gcp.container.NodePool
Explore with Pulumi AI
Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.
Example Usage
Using A Separately Managed Node Pool (Recommended)
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
accountId: "service-account-id",
displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
name: "my-gke-cluster",
location: "us-central1",
removeDefaultNodePool: true,
initialNodeCount: 1,
});
const primaryPreemptibleNodes = new gcp.container.NodePool("primary_preemptible_nodes", {
name: "my-node-pool",
cluster: primary.id,
nodeCount: 1,
nodeConfig: {
preemptible: true,
machineType: "e2-medium",
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
name="my-gke-cluster",
location="us-central1",
remove_default_node_pool=True,
initial_node_count=1)
primary_preemptible_nodes = gcp.container.NodePool("primary_preemptible_nodes",
name="my-node-pool",
cluster=primary.id,
node_count=1,
node_config={
"preemptible": True,
"machine_type": "e2-medium",
"service_account": default.email,
"oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_default, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
AccountId: pulumi.String("service-account-id"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
Name: pulumi.String("my-gke-cluster"),
Location: pulumi.String("us-central1"),
RemoveDefaultNodePool: pulumi.Bool(true),
InitialNodeCount: pulumi.Int(1),
})
if err != nil {
return err
}
_, err = container.NewNodePool(ctx, "primary_preemptible_nodes", &container.NodePoolArgs{
Name: pulumi.String("my-node-pool"),
Cluster: primary.ID(),
NodeCount: pulumi.Int(1),
NodeConfig: &container.NodePoolNodeConfigArgs{
Preemptible: pulumi.Bool(true),
MachineType: pulumi.String("e2-medium"),
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.ServiceAccount.Account("default", new()
{
AccountId = "service-account-id",
DisplayName = "Service Account",
});
var primary = new Gcp.Container.Cluster("primary", new()
{
Name = "my-gke-cluster",
Location = "us-central1",
RemoveDefaultNodePool = true,
InitialNodeCount = 1,
});
var primaryPreemptibleNodes = new Gcp.Container.NodePool("primary_preemptible_nodes", new()
{
Name = "my-node-pool",
Cluster = primary.Id,
NodeCount = 1,
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
Preemptible = true,
MachineType = "e2-medium",
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var primary = new Cluster("primary", ClusterArgs.builder()
.name("my-gke-cluster")
.location("us-central1")
.removeDefaultNodePool(true)
.initialNodeCount(1)
.build());
var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()
.name("my-node-pool")
.cluster(primary.id())
.nodeCount(1)
.nodeConfig(NodePoolNodeConfigArgs.builder()
.preemptible(true)
.machineType("e2-medium")
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.build())
.build());
}
}
resources:
default:
type: gcp:serviceaccount:Account
properties:
accountId: service-account-id
displayName: Service Account
primary:
type: gcp:container:Cluster
properties:
name: my-gke-cluster
location: us-central1
removeDefaultNodePool: true
initialNodeCount: 1
primaryPreemptibleNodes:
type: gcp:container:NodePool
name: primary_preemptible_nodes
properties:
name: my-node-pool
cluster: ${primary.id}
nodeCount: 1
nodeConfig:
preemptible: true
machineType: e2-medium
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
2 Node Pools, 1 Separately Managed + The Default Node Pool
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.serviceaccount.Account("default", {
accountId: "service-account-id",
displayName: "Service Account",
});
const primary = new gcp.container.Cluster("primary", {
name: "marcellus-wallace",
location: "us-central1-a",
initialNodeCount: 3,
nodeLocations: ["us-central1-c"],
nodeConfig: {
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
guestAccelerators: [{
type: "nvidia-tesla-k80",
count: 1,
}],
},
});
const np = new gcp.container.NodePool("np", {
name: "my-node-pool",
cluster: primary.id,
nodeConfig: {
machineType: "e2-medium",
serviceAccount: _default.email,
oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.serviceaccount.Account("default",
account_id="service-account-id",
display_name="Service Account")
primary = gcp.container.Cluster("primary",
name="marcellus-wallace",
location="us-central1-a",
initial_node_count=3,
node_locations=["us-central1-c"],
node_config={
"service_account": default.email,
"oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
"guest_accelerators": [{
"type": "nvidia-tesla-k80",
"count": 1,
}],
})
np = gcp.container.NodePool("np",
name="my-node-pool",
cluster=primary.id,
node_config={
"machine_type": "e2-medium",
"service_account": default.email,
"oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_default, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
AccountId: pulumi.String("service-account-id"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
Name: pulumi.String("marcellus-wallace"),
Location: pulumi.String("us-central1-a"),
InitialNodeCount: pulumi.Int(3),
NodeLocations: pulumi.StringArray{
pulumi.String("us-central1-c"),
},
NodeConfig: &container.ClusterNodeConfigArgs{
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
GuestAccelerators: container.ClusterNodeConfigGuestAcceleratorArray{
&container.ClusterNodeConfigGuestAcceleratorArgs{
Type: pulumi.String("nvidia-tesla-k80"),
Count: pulumi.Int(1),
},
},
},
})
if err != nil {
return err
}
_, err = container.NewNodePool(ctx, "np", &container.NodePoolArgs{
Name: pulumi.String("my-node-pool"),
Cluster: primary.ID(),
NodeConfig: &container.NodePoolNodeConfigArgs{
MachineType: pulumi.String("e2-medium"),
ServiceAccount: _default.Email,
OauthScopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.ServiceAccount.Account("default", new()
{
AccountId = "service-account-id",
DisplayName = "Service Account",
});
var primary = new Gcp.Container.Cluster("primary", new()
{
Name = "marcellus-wallace",
Location = "us-central1-a",
InitialNodeCount = 3,
NodeLocations = new[]
{
"us-central1-c",
},
NodeConfig = new Gcp.Container.Inputs.ClusterNodeConfigArgs
{
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
GuestAccelerators = new[]
{
new Gcp.Container.Inputs.ClusterNodeConfigGuestAcceleratorArgs
{
Type = "nvidia-tesla-k80",
Count = 1,
},
},
},
});
var np = new Gcp.Container.NodePool("np", new()
{
Name = "my-node-pool",
Cluster = primary.Id,
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
MachineType = "e2-medium",
ServiceAccount = @default.Email,
OauthScopes = new[]
{
"https://www.googleapis.com/auth/cloud-platform",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.container.Cluster;
import com.pulumi.gcp.container.ClusterArgs;
import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
import com.pulumi.gcp.container.NodePool;
import com.pulumi.gcp.container.NodePoolArgs;
import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Account("default", AccountArgs.builder()
.accountId("service-account-id")
.displayName("Service Account")
.build());
var primary = new Cluster("primary", ClusterArgs.builder()
.name("marcellus-wallace")
.location("us-central1-a")
.initialNodeCount(3)
.nodeLocations("us-central1-c")
.nodeConfig(ClusterNodeConfigArgs.builder()
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
.type("nvidia-tesla-k80")
.count(1)
.build())
.build())
.build());
var np = new NodePool("np", NodePoolArgs.builder()
.name("my-node-pool")
.cluster(primary.id())
.nodeConfig(NodePoolNodeConfigArgs.builder()
.machineType("e2-medium")
.serviceAccount(default_.email())
.oauthScopes("https://www.googleapis.com/auth/cloud-platform")
.build())
.build());
}
}
resources:
default:
type: gcp:serviceaccount:Account
properties:
accountId: service-account-id
displayName: Service Account
np:
type: gcp:container:NodePool
properties:
name: my-node-pool
cluster: ${primary.id}
nodeConfig:
machineType: e2-medium
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
primary:
type: gcp:container:Cluster
properties:
name: marcellus-wallace
location: us-central1-a
initialNodeCount: 3
nodeLocations:
- us-central1-c
nodeConfig:
serviceAccount: ${default.email}
oauthScopes:
- https://www.googleapis.com/auth/cloud-platform
guestAccelerators:
- type: nvidia-tesla-k80
count: 1
Create NodePool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
@overload
def NodePool(resource_name: str,
args: NodePoolArgs,
opts: Optional[ResourceOptions] = None)
@overload
def NodePool(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster: Optional[str] = None,
network_config: Optional[NodePoolNetworkConfigArgs] = None,
name_prefix: Optional[str] = None,
location: Optional[str] = None,
management: Optional[NodePoolManagementArgs] = None,
node_config: Optional[NodePoolNodeConfigArgs] = None,
name: Optional[str] = None,
initial_node_count: Optional[int] = None,
autoscaling: Optional[NodePoolAutoscalingArgs] = None,
max_pods_per_node: Optional[int] = None,
node_count: Optional[int] = None,
node_locations: Optional[Sequence[str]] = None,
placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
project: Optional[str] = None,
queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
version: Optional[str] = None)
func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
public NodePool(String name, NodePoolArgs args)
public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
type: gcp:container:NodePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name
This property is required. string - The unique name of the resource.
- args
This property is required. NodePoolArgs - The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name
This property is required. str - The unique name of the resource.
- args
This property is required. NodePoolArgs - The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name
This property is required. string - The unique name of the resource.
- args
This property is required. NodePoolArgs - The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name
This property is required. string - The unique name of the resource.
- args
This property is required. NodePoolArgs - The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name
This property is required. String - The unique name of the resource.
- args
This property is required. NodePoolArgs - The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var nodePoolResource = new Gcp.Container.NodePool("nodePoolResource", new()
{
Cluster = "string",
NetworkConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigArgs
{
AdditionalNodeNetworkConfigs = new[]
{
new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs
{
Network = "string",
Subnetwork = "string",
},
},
AdditionalPodNetworkConfigs = new[]
{
new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs
{
MaxPodsPerNode = 0,
SecondaryPodRange = "string",
Subnetwork = "string",
},
},
CreatePodRange = false,
EnablePrivateNodes = false,
NetworkPerformanceConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigNetworkPerformanceConfigArgs
{
TotalEgressBandwidthTier = "string",
},
PodCidrOverprovisionConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs
{
Disabled = false,
},
PodIpv4CidrBlock = "string",
PodRange = "string",
},
NamePrefix = "string",
Location = "string",
Management = new Gcp.Container.Inputs.NodePoolManagementArgs
{
AutoRepair = false,
AutoUpgrade = false,
},
NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
{
AdvancedMachineFeatures = new Gcp.Container.Inputs.NodePoolNodeConfigAdvancedMachineFeaturesArgs
{
ThreadsPerCore = 0,
EnableNestedVirtualization = false,
},
BootDiskKmsKey = "string",
ConfidentialNodes = new Gcp.Container.Inputs.NodePoolNodeConfigConfidentialNodesArgs
{
Enabled = false,
},
ContainerdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigArgs
{
PrivateRegistryAccessConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs
{
Enabled = false,
CertificateAuthorityDomainConfigs = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs
{
Fqdns = new[]
{
"string",
},
GcpSecretManagerCertificateConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs
{
SecretUri = "string",
},
},
},
},
},
DiskSizeGb = 0,
DiskType = "string",
EffectiveTaints = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigEffectiveTaintArgs
{
Effect = "string",
Key = "string",
Value = "string",
},
},
EnableConfidentialStorage = false,
EphemeralStorageConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageConfigArgs
{
LocalSsdCount = 0,
},
EphemeralStorageLocalSsdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs
{
LocalSsdCount = 0,
},
FastSocket = new Gcp.Container.Inputs.NodePoolNodeConfigFastSocketArgs
{
Enabled = false,
},
GcfsConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGcfsConfigArgs
{
Enabled = false,
},
GuestAccelerators = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorArgs
{
Count = 0,
Type = "string",
GpuDriverInstallationConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs
{
GpuDriverVersion = "string",
},
GpuPartitionSize = "string",
GpuSharingConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs
{
GpuSharingStrategy = "string",
MaxSharedClientsPerGpu = 0,
},
},
},
Gvnic = new Gcp.Container.Inputs.NodePoolNodeConfigGvnicArgs
{
Enabled = false,
},
HostMaintenancePolicy = new Gcp.Container.Inputs.NodePoolNodeConfigHostMaintenancePolicyArgs
{
MaintenanceInterval = "string",
},
ImageType = "string",
KubeletConfig = new Gcp.Container.Inputs.NodePoolNodeConfigKubeletConfigArgs
{
AllowedUnsafeSysctls = new[]
{
"string",
},
ContainerLogMaxFiles = 0,
ContainerLogMaxSize = "string",
CpuCfsQuota = false,
CpuCfsQuotaPeriod = "string",
CpuManagerPolicy = "string",
ImageGcHighThresholdPercent = 0,
ImageGcLowThresholdPercent = 0,
ImageMaximumGcAge = "string",
ImageMinimumGcAge = "string",
InsecureKubeletReadonlyPortEnabled = "string",
PodPidsLimit = 0,
},
Labels =
{
{ "string", "string" },
},
LinuxNodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigArgs
{
CgroupMode = "string",
HugepagesConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs
{
HugepageSize1g = 0,
HugepageSize2m = 0,
},
Sysctls =
{
{ "string", "string" },
},
},
LocalNvmeSsdBlockConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs
{
LocalSsdCount = 0,
},
LocalSsdCount = 0,
LocalSsdEncryptionMode = "string",
LoggingVariant = "string",
MachineType = "string",
MaxRunDuration = "string",
Metadata =
{
{ "string", "string" },
},
MinCpuPlatform = "string",
NodeGroup = "string",
OauthScopes = new[]
{
"string",
},
Preemptible = false,
ReservationAffinity = new Gcp.Container.Inputs.NodePoolNodeConfigReservationAffinityArgs
{
ConsumeReservationType = "string",
Key = "string",
Values = new[]
{
"string",
},
},
ResourceLabels =
{
{ "string", "string" },
},
ResourceManagerTags =
{
{ "string", "string" },
},
SandboxConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSandboxConfigArgs
{
SandboxType = "string",
},
SecondaryBootDisks = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigSecondaryBootDiskArgs
{
DiskImage = "string",
Mode = "string",
},
},
ServiceAccount = "string",
ShieldedInstanceConfig = new Gcp.Container.Inputs.NodePoolNodeConfigShieldedInstanceConfigArgs
{
EnableIntegrityMonitoring = false,
EnableSecureBoot = false,
},
SoleTenantConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigArgs
{
NodeAffinities = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs
{
Key = "string",
Operator = "string",
Values = new[]
{
"string",
},
},
},
},
Spot = false,
StoragePools = new[]
{
"string",
},
Tags = new[]
{
"string",
},
Taints = new[]
{
new Gcp.Container.Inputs.NodePoolNodeConfigTaintArgs
{
Effect = "string",
Key = "string",
Value = "string",
},
},
WorkloadMetadataConfig = new Gcp.Container.Inputs.NodePoolNodeConfigWorkloadMetadataConfigArgs
{
Mode = "string",
},
},
Name = "string",
InitialNodeCount = 0,
Autoscaling = new Gcp.Container.Inputs.NodePoolAutoscalingArgs
{
LocationPolicy = "string",
MaxNodeCount = 0,
MinNodeCount = 0,
TotalMaxNodeCount = 0,
TotalMinNodeCount = 0,
},
MaxPodsPerNode = 0,
NodeCount = 0,
NodeLocations = new[]
{
"string",
},
PlacementPolicy = new Gcp.Container.Inputs.NodePoolPlacementPolicyArgs
{
Type = "string",
PolicyName = "string",
TpuTopology = "string",
},
Project = "string",
QueuedProvisioning = new Gcp.Container.Inputs.NodePoolQueuedProvisioningArgs
{
Enabled = false,
},
UpgradeSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsArgs
{
BlueGreenSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsArgs
{
StandardRolloutPolicy = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs
{
BatchNodeCount = 0,
BatchPercentage = 0,
BatchSoakDuration = "string",
},
NodePoolSoakDuration = "string",
},
MaxSurge = 0,
MaxUnavailable = 0,
Strategy = "string",
},
Version = "string",
});
example, err := container.NewNodePool(ctx, "nodePoolResource", &container.NodePoolArgs{
Cluster: pulumi.String("string"),
NetworkConfig: &container.NodePoolNetworkConfigArgs{
AdditionalNodeNetworkConfigs: container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArray{
&container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs{
Network: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
},
},
AdditionalPodNetworkConfigs: container.NodePoolNetworkConfigAdditionalPodNetworkConfigArray{
&container.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs{
MaxPodsPerNode: pulumi.Int(0),
SecondaryPodRange: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
},
},
CreatePodRange: pulumi.Bool(false),
EnablePrivateNodes: pulumi.Bool(false),
NetworkPerformanceConfig: &container.NodePoolNetworkConfigNetworkPerformanceConfigArgs{
TotalEgressBandwidthTier: pulumi.String("string"),
},
PodCidrOverprovisionConfig: &container.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs{
Disabled: pulumi.Bool(false),
},
PodIpv4CidrBlock: pulumi.String("string"),
PodRange: pulumi.String("string"),
},
NamePrefix: pulumi.String("string"),
Location: pulumi.String("string"),
Management: &container.NodePoolManagementArgs{
AutoRepair: pulumi.Bool(false),
AutoUpgrade: pulumi.Bool(false),
},
NodeConfig: &container.NodePoolNodeConfigArgs{
AdvancedMachineFeatures: &container.NodePoolNodeConfigAdvancedMachineFeaturesArgs{
ThreadsPerCore: pulumi.Int(0),
EnableNestedVirtualization: pulumi.Bool(false),
},
BootDiskKmsKey: pulumi.String("string"),
ConfidentialNodes: &container.NodePoolNodeConfigConfidentialNodesArgs{
Enabled: pulumi.Bool(false),
},
ContainerdConfig: &container.NodePoolNodeConfigContainerdConfigArgs{
PrivateRegistryAccessConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs{
Enabled: pulumi.Bool(false),
CertificateAuthorityDomainConfigs: container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArray{
&container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs{
Fqdns: pulumi.StringArray{
pulumi.String("string"),
},
GcpSecretManagerCertificateConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs{
SecretUri: pulumi.String("string"),
},
},
},
},
},
DiskSizeGb: pulumi.Int(0),
DiskType: pulumi.String("string"),
EffectiveTaints: container.NodePoolNodeConfigEffectiveTaintArray{
&container.NodePoolNodeConfigEffectiveTaintArgs{
Effect: pulumi.String("string"),
Key: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
EnableConfidentialStorage: pulumi.Bool(false),
EphemeralStorageConfig: &container.NodePoolNodeConfigEphemeralStorageConfigArgs{
LocalSsdCount: pulumi.Int(0),
},
EphemeralStorageLocalSsdConfig: &container.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs{
LocalSsdCount: pulumi.Int(0),
},
FastSocket: &container.NodePoolNodeConfigFastSocketArgs{
Enabled: pulumi.Bool(false),
},
GcfsConfig: &container.NodePoolNodeConfigGcfsConfigArgs{
Enabled: pulumi.Bool(false),
},
GuestAccelerators: container.NodePoolNodeConfigGuestAcceleratorArray{
&container.NodePoolNodeConfigGuestAcceleratorArgs{
Count: pulumi.Int(0),
Type: pulumi.String("string"),
GpuDriverInstallationConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs{
GpuDriverVersion: pulumi.String("string"),
},
GpuPartitionSize: pulumi.String("string"),
GpuSharingConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs{
GpuSharingStrategy: pulumi.String("string"),
MaxSharedClientsPerGpu: pulumi.Int(0),
},
},
},
Gvnic: &container.NodePoolNodeConfigGvnicArgs{
Enabled: pulumi.Bool(false),
},
HostMaintenancePolicy: &container.NodePoolNodeConfigHostMaintenancePolicyArgs{
MaintenanceInterval: pulumi.String("string"),
},
ImageType: pulumi.String("string"),
KubeletConfig: &container.NodePoolNodeConfigKubeletConfigArgs{
AllowedUnsafeSysctls: pulumi.StringArray{
pulumi.String("string"),
},
ContainerLogMaxFiles: pulumi.Int(0),
ContainerLogMaxSize: pulumi.String("string"),
CpuCfsQuota: pulumi.Bool(false),
CpuCfsQuotaPeriod: pulumi.String("string"),
CpuManagerPolicy: pulumi.String("string"),
ImageGcHighThresholdPercent: pulumi.Int(0),
ImageGcLowThresholdPercent: pulumi.Int(0),
ImageMaximumGcAge: pulumi.String("string"),
ImageMinimumGcAge: pulumi.String("string"),
InsecureKubeletReadonlyPortEnabled: pulumi.String("string"),
PodPidsLimit: pulumi.Int(0),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
LinuxNodeConfig: &container.NodePoolNodeConfigLinuxNodeConfigArgs{
CgroupMode: pulumi.String("string"),
HugepagesConfig: &container.NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs{
HugepageSize1g: pulumi.Int(0),
HugepageSize2m: pulumi.Int(0),
},
Sysctls: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
LocalNvmeSsdBlockConfig: &container.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs{
LocalSsdCount: pulumi.Int(0),
},
LocalSsdCount: pulumi.Int(0),
LocalSsdEncryptionMode: pulumi.String("string"),
LoggingVariant: pulumi.String("string"),
MachineType: pulumi.String("string"),
MaxRunDuration: pulumi.String("string"),
Metadata: pulumi.StringMap{
"string": pulumi.String("string"),
},
MinCpuPlatform: pulumi.String("string"),
NodeGroup: pulumi.String("string"),
OauthScopes: pulumi.StringArray{
pulumi.String("string"),
},
Preemptible: pulumi.Bool(false),
ReservationAffinity: &container.NodePoolNodeConfigReservationAffinityArgs{
ConsumeReservationType: pulumi.String("string"),
Key: pulumi.String("string"),
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
ResourceLabels: pulumi.StringMap{
"string": pulumi.String("string"),
},
ResourceManagerTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
SandboxConfig: &container.NodePoolNodeConfigSandboxConfigArgs{
SandboxType: pulumi.String("string"),
},
SecondaryBootDisks: container.NodePoolNodeConfigSecondaryBootDiskArray{
&container.NodePoolNodeConfigSecondaryBootDiskArgs{
DiskImage: pulumi.String("string"),
Mode: pulumi.String("string"),
},
},
ServiceAccount: pulumi.String("string"),
ShieldedInstanceConfig: &container.NodePoolNodeConfigShieldedInstanceConfigArgs{
EnableIntegrityMonitoring: pulumi.Bool(false),
EnableSecureBoot: pulumi.Bool(false),
},
SoleTenantConfig: &container.NodePoolNodeConfigSoleTenantConfigArgs{
NodeAffinities: container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArray{
&container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs{
Key: pulumi.String("string"),
Operator: pulumi.String("string"),
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
Spot: pulumi.Bool(false),
StoragePools: pulumi.StringArray{
pulumi.String("string"),
},
Tags: pulumi.StringArray{
pulumi.String("string"),
},
Taints: container.NodePoolNodeConfigTaintArray{
&container.NodePoolNodeConfigTaintArgs{
Effect: pulumi.String("string"),
Key: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
WorkloadMetadataConfig: &container.NodePoolNodeConfigWorkloadMetadataConfigArgs{
Mode: pulumi.String("string"),
},
},
Name: pulumi.String("string"),
InitialNodeCount: pulumi.Int(0),
Autoscaling: &container.NodePoolAutoscalingArgs{
LocationPolicy: pulumi.String("string"),
MaxNodeCount: pulumi.Int(0),
MinNodeCount: pulumi.Int(0),
TotalMaxNodeCount: pulumi.Int(0),
TotalMinNodeCount: pulumi.Int(0),
},
MaxPodsPerNode: pulumi.Int(0),
NodeCount: pulumi.Int(0),
NodeLocations: pulumi.StringArray{
pulumi.String("string"),
},
PlacementPolicy: &container.NodePoolPlacementPolicyArgs{
Type: pulumi.String("string"),
PolicyName: pulumi.String("string"),
TpuTopology: pulumi.String("string"),
},
Project: pulumi.String("string"),
QueuedProvisioning: &container.NodePoolQueuedProvisioningArgs{
Enabled: pulumi.Bool(false),
},
UpgradeSettings: &container.NodePoolUpgradeSettingsArgs{
BlueGreenSettings: &container.NodePoolUpgradeSettingsBlueGreenSettingsArgs{
StandardRolloutPolicy: &container.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs{
BatchNodeCount: pulumi.Int(0),
BatchPercentage: pulumi.Float64(0),
BatchSoakDuration: pulumi.String("string"),
},
NodePoolSoakDuration: pulumi.String("string"),
},
MaxSurge: pulumi.Int(0),
MaxUnavailable: pulumi.Int(0),
Strategy: pulumi.String("string"),
},
Version: pulumi.String("string"),
})
var nodePoolResource = new NodePool("nodePoolResource", NodePoolArgs.builder()
.cluster("string")
.networkConfig(NodePoolNetworkConfigArgs.builder()
.additionalNodeNetworkConfigs(NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs.builder()
.network("string")
.subnetwork("string")
.build())
.additionalPodNetworkConfigs(NodePoolNetworkConfigAdditionalPodNetworkConfigArgs.builder()
.maxPodsPerNode(0)
.secondaryPodRange("string")
.subnetwork("string")
.build())
.createPodRange(false)
.enablePrivateNodes(false)
.networkPerformanceConfig(NodePoolNetworkConfigNetworkPerformanceConfigArgs.builder()
.totalEgressBandwidthTier("string")
.build())
.podCidrOverprovisionConfig(NodePoolNetworkConfigPodCidrOverprovisionConfigArgs.builder()
.disabled(false)
.build())
.podIpv4CidrBlock("string")
.podRange("string")
.build())
.namePrefix("string")
.location("string")
.management(NodePoolManagementArgs.builder()
.autoRepair(false)
.autoUpgrade(false)
.build())
.nodeConfig(NodePoolNodeConfigArgs.builder()
.advancedMachineFeatures(NodePoolNodeConfigAdvancedMachineFeaturesArgs.builder()
.threadsPerCore(0)
.enableNestedVirtualization(false)
.build())
.bootDiskKmsKey("string")
.confidentialNodes(NodePoolNodeConfigConfidentialNodesArgs.builder()
.enabled(false)
.build())
.containerdConfig(NodePoolNodeConfigContainerdConfigArgs.builder()
.privateRegistryAccessConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs.builder()
.enabled(false)
.certificateAuthorityDomainConfigs(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs.builder()
.fqdns("string")
.gcpSecretManagerCertificateConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs.builder()
.secretUri("string")
.build())
.build())
.build())
.build())
.diskSizeGb(0)
.diskType("string")
.effectiveTaints(NodePoolNodeConfigEffectiveTaintArgs.builder()
.effect("string")
.key("string")
.value("string")
.build())
.enableConfidentialStorage(false)
.ephemeralStorageConfig(NodePoolNodeConfigEphemeralStorageConfigArgs.builder()
.localSsdCount(0)
.build())
.ephemeralStorageLocalSsdConfig(NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs.builder()
.localSsdCount(0)
.build())
.fastSocket(NodePoolNodeConfigFastSocketArgs.builder()
.enabled(false)
.build())
.gcfsConfig(NodePoolNodeConfigGcfsConfigArgs.builder()
.enabled(false)
.build())
.guestAccelerators(NodePoolNodeConfigGuestAcceleratorArgs.builder()
.count(0)
.type("string")
.gpuDriverInstallationConfig(NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs.builder()
.gpuDriverVersion("string")
.build())
.gpuPartitionSize("string")
.gpuSharingConfig(NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs.builder()
.gpuSharingStrategy("string")
.maxSharedClientsPerGpu(0)
.build())
.build())
.gvnic(NodePoolNodeConfigGvnicArgs.builder()
.enabled(false)
.build())
.hostMaintenancePolicy(NodePoolNodeConfigHostMaintenancePolicyArgs.builder()
.maintenanceInterval("string")
.build())
.imageType("string")
.kubeletConfig(NodePoolNodeConfigKubeletConfigArgs.builder()
.allowedUnsafeSysctls("string")
.containerLogMaxFiles(0)
.containerLogMaxSize("string")
.cpuCfsQuota(false)
.cpuCfsQuotaPeriod("string")
.cpuManagerPolicy("string")
.imageGcHighThresholdPercent(0)
.imageGcLowThresholdPercent(0)
.imageMaximumGcAge("string")
.imageMinimumGcAge("string")
.insecureKubeletReadonlyPortEnabled("string")
.podPidsLimit(0)
.build())
.labels(Map.of("string", "string"))
.linuxNodeConfig(NodePoolNodeConfigLinuxNodeConfigArgs.builder()
.cgroupMode("string")
.hugepagesConfig(NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs.builder()
.hugepageSize1g(0)
.hugepageSize2m(0)
.build())
.sysctls(Map.of("string", "string"))
.build())
.localNvmeSsdBlockConfig(NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs.builder()
.localSsdCount(0)
.build())
.localSsdCount(0)
.localSsdEncryptionMode("string")
.loggingVariant("string")
.machineType("string")
.maxRunDuration("string")
.metadata(Map.of("string", "string"))
.minCpuPlatform("string")
.nodeGroup("string")
.oauthScopes("string")
.preemptible(false)
.reservationAffinity(NodePoolNodeConfigReservationAffinityArgs.builder()
.consumeReservationType("string")
.key("string")
.values("string")
.build())
.resourceLabels(Map.of("string", "string"))
.resourceManagerTags(Map.of("string", "string"))
.sandboxConfig(NodePoolNodeConfigSandboxConfigArgs.builder()
.sandboxType("string")
.build())
.secondaryBootDisks(NodePoolNodeConfigSecondaryBootDiskArgs.builder()
.diskImage("string")
.mode("string")
.build())
.serviceAccount("string")
.shieldedInstanceConfig(NodePoolNodeConfigShieldedInstanceConfigArgs.builder()
.enableIntegrityMonitoring(false)
.enableSecureBoot(false)
.build())
.soleTenantConfig(NodePoolNodeConfigSoleTenantConfigArgs.builder()
.nodeAffinities(NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs.builder()
.key("string")
.operator("string")
.values("string")
.build())
.build())
.spot(false)
.storagePools("string")
.tags("string")
.taints(NodePoolNodeConfigTaintArgs.builder()
.effect("string")
.key("string")
.value("string")
.build())
.workloadMetadataConfig(NodePoolNodeConfigWorkloadMetadataConfigArgs.builder()
.mode("string")
.build())
.build())
.name("string")
.initialNodeCount(0)
.autoscaling(NodePoolAutoscalingArgs.builder()
.locationPolicy("string")
.maxNodeCount(0)
.minNodeCount(0)
.totalMaxNodeCount(0)
.totalMinNodeCount(0)
.build())
.maxPodsPerNode(0)
.nodeCount(0)
.nodeLocations("string")
.placementPolicy(NodePoolPlacementPolicyArgs.builder()
.type("string")
.policyName("string")
.tpuTopology("string")
.build())
.project("string")
.queuedProvisioning(NodePoolQueuedProvisioningArgs.builder()
.enabled(false)
.build())
.upgradeSettings(NodePoolUpgradeSettingsArgs.builder()
.blueGreenSettings(NodePoolUpgradeSettingsBlueGreenSettingsArgs.builder()
.standardRolloutPolicy(NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs.builder()
.batchNodeCount(0)
.batchPercentage(0)
.batchSoakDuration("string")
.build())
.nodePoolSoakDuration("string")
.build())
.maxSurge(0)
.maxUnavailable(0)
.strategy("string")
.build())
.version("string")
.build());
node_pool_resource = gcp.container.NodePool("nodePoolResource",
cluster="string",
network_config={
"additional_node_network_configs": [{
"network": "string",
"subnetwork": "string",
}],
"additional_pod_network_configs": [{
"max_pods_per_node": 0,
"secondary_pod_range": "string",
"subnetwork": "string",
}],
"create_pod_range": False,
"enable_private_nodes": False,
"network_performance_config": {
"total_egress_bandwidth_tier": "string",
},
"pod_cidr_overprovision_config": {
"disabled": False,
},
"pod_ipv4_cidr_block": "string",
"pod_range": "string",
},
name_prefix="string",
location="string",
management={
"auto_repair": False,
"auto_upgrade": False,
},
node_config={
"advanced_machine_features": {
"threads_per_core": 0,
"enable_nested_virtualization": False,
},
"boot_disk_kms_key": "string",
"confidential_nodes": {
"enabled": False,
},
"containerd_config": {
"private_registry_access_config": {
"enabled": False,
"certificate_authority_domain_configs": [{
"fqdns": ["string"],
"gcp_secret_manager_certificate_config": {
"secret_uri": "string",
},
}],
},
},
"disk_size_gb": 0,
"disk_type": "string",
"effective_taints": [{
"effect": "string",
"key": "string",
"value": "string",
}],
"enable_confidential_storage": False,
"ephemeral_storage_config": {
"local_ssd_count": 0,
},
"ephemeral_storage_local_ssd_config": {
"local_ssd_count": 0,
},
"fast_socket": {
"enabled": False,
},
"gcfs_config": {
"enabled": False,
},
"guest_accelerators": [{
"count": 0,
"type": "string",
"gpu_driver_installation_config": {
"gpu_driver_version": "string",
},
"gpu_partition_size": "string",
"gpu_sharing_config": {
"gpu_sharing_strategy": "string",
"max_shared_clients_per_gpu": 0,
},
}],
"gvnic": {
"enabled": False,
},
"host_maintenance_policy": {
"maintenance_interval": "string",
},
"image_type": "string",
"kubelet_config": {
"allowed_unsafe_sysctls": ["string"],
"container_log_max_files": 0,
"container_log_max_size": "string",
"cpu_cfs_quota": False,
"cpu_cfs_quota_period": "string",
"cpu_manager_policy": "string",
"image_gc_high_threshold_percent": 0,
"image_gc_low_threshold_percent": 0,
"image_maximum_gc_age": "string",
"image_minimum_gc_age": "string",
"insecure_kubelet_readonly_port_enabled": "string",
"pod_pids_limit": 0,
},
"labels": {
"string": "string",
},
"linux_node_config": {
"cgroup_mode": "string",
"hugepages_config": {
"hugepage_size1g": 0,
"hugepage_size2m": 0,
},
"sysctls": {
"string": "string",
},
},
"local_nvme_ssd_block_config": {
"local_ssd_count": 0,
},
"local_ssd_count": 0,
"local_ssd_encryption_mode": "string",
"logging_variant": "string",
"machine_type": "string",
"max_run_duration": "string",
"metadata": {
"string": "string",
},
"min_cpu_platform": "string",
"node_group": "string",
"oauth_scopes": ["string"],
"preemptible": False,
"reservation_affinity": {
"consume_reservation_type": "string",
"key": "string",
"values": ["string"],
},
"resource_labels": {
"string": "string",
},
"resource_manager_tags": {
"string": "string",
},
"sandbox_config": {
"sandbox_type": "string",
},
"secondary_boot_disks": [{
"disk_image": "string",
"mode": "string",
}],
"service_account": "string",
"shielded_instance_config": {
"enable_integrity_monitoring": False,
"enable_secure_boot": False,
},
"sole_tenant_config": {
"node_affinities": [{
"key": "string",
"operator": "string",
"values": ["string"],
}],
},
"spot": False,
"storage_pools": ["string"],
"tags": ["string"],
"taints": [{
"effect": "string",
"key": "string",
"value": "string",
}],
"workload_metadata_config": {
"mode": "string",
},
},
name="string",
initial_node_count=0,
autoscaling={
"location_policy": "string",
"max_node_count": 0,
"min_node_count": 0,
"total_max_node_count": 0,
"total_min_node_count": 0,
},
max_pods_per_node=0,
node_count=0,
node_locations=["string"],
placement_policy={
"type": "string",
"policy_name": "string",
"tpu_topology": "string",
},
project="string",
queued_provisioning={
"enabled": False,
},
upgrade_settings={
"blue_green_settings": {
"standard_rollout_policy": {
"batch_node_count": 0,
"batch_percentage": 0,
"batch_soak_duration": "string",
},
"node_pool_soak_duration": "string",
},
"max_surge": 0,
"max_unavailable": 0,
"strategy": "string",
},
version="string")
const nodePoolResource = new gcp.container.NodePool("nodePoolResource", {
cluster: "string",
networkConfig: {
additionalNodeNetworkConfigs: [{
network: "string",
subnetwork: "string",
}],
additionalPodNetworkConfigs: [{
maxPodsPerNode: 0,
secondaryPodRange: "string",
subnetwork: "string",
}],
createPodRange: false,
enablePrivateNodes: false,
networkPerformanceConfig: {
totalEgressBandwidthTier: "string",
},
podCidrOverprovisionConfig: {
disabled: false,
},
podIpv4CidrBlock: "string",
podRange: "string",
},
namePrefix: "string",
location: "string",
management: {
autoRepair: false,
autoUpgrade: false,
},
nodeConfig: {
advancedMachineFeatures: {
threadsPerCore: 0,
enableNestedVirtualization: false,
},
bootDiskKmsKey: "string",
confidentialNodes: {
enabled: false,
},
containerdConfig: {
privateRegistryAccessConfig: {
enabled: false,
certificateAuthorityDomainConfigs: [{
fqdns: ["string"],
gcpSecretManagerCertificateConfig: {
secretUri: "string",
},
}],
},
},
diskSizeGb: 0,
diskType: "string",
effectiveTaints: [{
effect: "string",
key: "string",
value: "string",
}],
enableConfidentialStorage: false,
ephemeralStorageConfig: {
localSsdCount: 0,
},
ephemeralStorageLocalSsdConfig: {
localSsdCount: 0,
},
fastSocket: {
enabled: false,
},
gcfsConfig: {
enabled: false,
},
guestAccelerators: [{
count: 0,
type: "string",
gpuDriverInstallationConfig: {
gpuDriverVersion: "string",
},
gpuPartitionSize: "string",
gpuSharingConfig: {
gpuSharingStrategy: "string",
maxSharedClientsPerGpu: 0,
},
}],
gvnic: {
enabled: false,
},
hostMaintenancePolicy: {
maintenanceInterval: "string",
},
imageType: "string",
kubeletConfig: {
allowedUnsafeSysctls: ["string"],
containerLogMaxFiles: 0,
containerLogMaxSize: "string",
cpuCfsQuota: false,
cpuCfsQuotaPeriod: "string",
cpuManagerPolicy: "string",
imageGcHighThresholdPercent: 0,
imageGcLowThresholdPercent: 0,
imageMaximumGcAge: "string",
imageMinimumGcAge: "string",
insecureKubeletReadonlyPortEnabled: "string",
podPidsLimit: 0,
},
labels: {
string: "string",
},
linuxNodeConfig: {
cgroupMode: "string",
hugepagesConfig: {
hugepageSize1g: 0,
hugepageSize2m: 0,
},
sysctls: {
string: "string",
},
},
localNvmeSsdBlockConfig: {
localSsdCount: 0,
},
localSsdCount: 0,
localSsdEncryptionMode: "string",
loggingVariant: "string",
machineType: "string",
maxRunDuration: "string",
metadata: {
string: "string",
},
minCpuPlatform: "string",
nodeGroup: "string",
oauthScopes: ["string"],
preemptible: false,
reservationAffinity: {
consumeReservationType: "string",
key: "string",
values: ["string"],
},
resourceLabels: {
string: "string",
},
resourceManagerTags: {
string: "string",
},
sandboxConfig: {
sandboxType: "string",
},
secondaryBootDisks: [{
diskImage: "string",
mode: "string",
}],
serviceAccount: "string",
shieldedInstanceConfig: {
enableIntegrityMonitoring: false,
enableSecureBoot: false,
},
soleTenantConfig: {
nodeAffinities: [{
key: "string",
operator: "string",
values: ["string"],
}],
},
spot: false,
storagePools: ["string"],
tags: ["string"],
taints: [{
effect: "string",
key: "string",
value: "string",
}],
workloadMetadataConfig: {
mode: "string",
},
},
name: "string",
initialNodeCount: 0,
autoscaling: {
locationPolicy: "string",
maxNodeCount: 0,
minNodeCount: 0,
totalMaxNodeCount: 0,
totalMinNodeCount: 0,
},
maxPodsPerNode: 0,
nodeCount: 0,
nodeLocations: ["string"],
placementPolicy: {
type: "string",
policyName: "string",
tpuTopology: "string",
},
project: "string",
queuedProvisioning: {
enabled: false,
},
upgradeSettings: {
blueGreenSettings: {
standardRolloutPolicy: {
batchNodeCount: 0,
batchPercentage: 0,
batchSoakDuration: "string",
},
nodePoolSoakDuration: "string",
},
maxSurge: 0,
maxUnavailable: 0,
strategy: "string",
},
version: "string",
});
type: gcp:container:NodePool
properties:
autoscaling:
locationPolicy: string
maxNodeCount: 0
minNodeCount: 0
totalMaxNodeCount: 0
totalMinNodeCount: 0
cluster: string
initialNodeCount: 0
location: string
management:
autoRepair: false
autoUpgrade: false
maxPodsPerNode: 0
name: string
namePrefix: string
networkConfig:
additionalNodeNetworkConfigs:
- network: string
subnetwork: string
additionalPodNetworkConfigs:
- maxPodsPerNode: 0
secondaryPodRange: string
subnetwork: string
createPodRange: false
enablePrivateNodes: false
networkPerformanceConfig:
totalEgressBandwidthTier: string
podCidrOverprovisionConfig:
disabled: false
podIpv4CidrBlock: string
podRange: string
nodeConfig:
advancedMachineFeatures:
enableNestedVirtualization: false
threadsPerCore: 0
bootDiskKmsKey: string
confidentialNodes:
enabled: false
containerdConfig:
privateRegistryAccessConfig:
certificateAuthorityDomainConfigs:
- fqdns:
- string
gcpSecretManagerCertificateConfig:
secretUri: string
enabled: false
diskSizeGb: 0
diskType: string
effectiveTaints:
- effect: string
key: string
value: string
enableConfidentialStorage: false
ephemeralStorageConfig:
localSsdCount: 0
ephemeralStorageLocalSsdConfig:
localSsdCount: 0
fastSocket:
enabled: false
gcfsConfig:
enabled: false
guestAccelerators:
- count: 0
gpuDriverInstallationConfig:
gpuDriverVersion: string
gpuPartitionSize: string
gpuSharingConfig:
gpuSharingStrategy: string
maxSharedClientsPerGpu: 0
type: string
gvnic:
enabled: false
hostMaintenancePolicy:
maintenanceInterval: string
imageType: string
kubeletConfig:
allowedUnsafeSysctls:
- string
containerLogMaxFiles: 0
containerLogMaxSize: string
cpuCfsQuota: false
cpuCfsQuotaPeriod: string
cpuManagerPolicy: string
imageGcHighThresholdPercent: 0
imageGcLowThresholdPercent: 0
imageMaximumGcAge: string
imageMinimumGcAge: string
insecureKubeletReadonlyPortEnabled: string
podPidsLimit: 0
labels:
string: string
linuxNodeConfig:
cgroupMode: string
hugepagesConfig:
hugepageSize1g: 0
hugepageSize2m: 0
sysctls:
string: string
localNvmeSsdBlockConfig:
localSsdCount: 0
localSsdCount: 0
localSsdEncryptionMode: string
loggingVariant: string
machineType: string
maxRunDuration: string
metadata:
string: string
minCpuPlatform: string
nodeGroup: string
oauthScopes:
- string
preemptible: false
reservationAffinity:
consumeReservationType: string
key: string
values:
- string
resourceLabels:
string: string
resourceManagerTags:
string: string
sandboxConfig:
sandboxType: string
secondaryBootDisks:
- diskImage: string
mode: string
serviceAccount: string
shieldedInstanceConfig:
enableIntegrityMonitoring: false
enableSecureBoot: false
soleTenantConfig:
nodeAffinities:
- key: string
operator: string
values:
- string
spot: false
storagePools:
- string
tags:
- string
taints:
- effect: string
key: string
value: string
workloadMetadataConfig:
mode: string
nodeCount: 0
nodeLocations:
- string
placementPolicy:
policyName: string
tpuTopology: string
type: string
project: string
queuedProvisioning:
enabled: false
upgradeSettings:
blueGreenSettings:
nodePoolSoakDuration: string
standardRolloutPolicy:
batchNodeCount: 0
batchPercentage: 0
batchSoakDuration: string
maxSurge: 0
maxUnavailable: 0
strategy: string
version: string
NodePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The NodePool resource accepts the following input properties:
- Cluster
This property is required. Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- Management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config Changes to this property will trigger replacement.
Pool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations List<string> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy - Specifies a custom placement policy for the nodes.
- Project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- Upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- Cluster
This property is required. Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- Management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config Changes to this property will trigger replacement.
Pool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations []string The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy Args - Specifies a custom placement policy for the nodes.
- Project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning Args - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- Upgrade
Settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster
This property is required. Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Changes to this property will trigger replacement.
Pool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Integer - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy - Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster
This property is required. Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Changes to this property will trigger replacement.
Pool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations string[] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy - Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster
This property is required. Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial_
node_ count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_
pods_ per_ node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_
prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network_
config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_
config Changes to this property will trigger replacement.
Pool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_
count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node_
locations Sequence[str] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement_
policy Changes to this property will trigger replacement.
Pool Placement Policy Args - Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued_
provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning Args - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade_
settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- cluster
This property is required. Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - autoscaling Property Map
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- management Property Map
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config Property Map - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Changes to this property will trigger replacement.
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- placement
Policy Changes to this property will trigger replacement.
- Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Changes to this property will trigger replacement.
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade
Settings Property Map - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
Outputs
All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Group List<string>Urls - The resource URLs of the managed instance groups associated with this node pool.
- Managed
Instance List<string>Group Urls - List of instance group URLs which have been assigned to this node pool.
- Operation string
- Id string
- The provider-assigned unique ID for this managed resource.
- Instance
Group []stringUrls - The resource URLs of the managed instance groups associated with this node pool.
- Managed
Instance []stringGroup Urls - List of instance group URLs which have been assigned to this node pool.
- Operation string
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- operation String
- id string
- The provider-assigned unique ID for this managed resource.
- instance
Group string[]Urls - The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance string[]Group Urls - List of instance group URLs which have been assigned to this node pool.
- operation string
- id str
- The provider-assigned unique ID for this managed resource.
- instance_
group_ Sequence[str]urls - The resource URLs of the managed instance groups associated with this node pool.
- managed_
instance_ Sequence[str]group_ urls - List of instance group URLs which have been assigned to this node pool.
- operation str
- id String
- The provider-assigned unique ID for this managed resource.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- operation String
Look up Existing NodePool Resource
Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
autoscaling: Optional[NodePoolAutoscalingArgs] = None,
cluster: Optional[str] = None,
initial_node_count: Optional[int] = None,
instance_group_urls: Optional[Sequence[str]] = None,
location: Optional[str] = None,
managed_instance_group_urls: Optional[Sequence[str]] = None,
management: Optional[NodePoolManagementArgs] = None,
max_pods_per_node: Optional[int] = None,
name: Optional[str] = None,
name_prefix: Optional[str] = None,
network_config: Optional[NodePoolNetworkConfigArgs] = None,
node_config: Optional[NodePoolNodeConfigArgs] = None,
node_count: Optional[int] = None,
node_locations: Optional[Sequence[str]] = None,
operation: Optional[str] = None,
placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
project: Optional[str] = None,
queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
version: Optional[str] = None) -> NodePool
func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
resources: _: type: gcp:container:NodePool get: id: ${id}
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster
Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Instance
Group List<string>Urls - The resource URLs of the managed instance groups associated with this node pool.
- Location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- Managed
Instance List<string>Group Urls - List of instance group URLs which have been assigned to this node pool.
- Management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config Changes to this property will trigger replacement.
Pool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations List<string> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Operation string
- Placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy - Specifies a custom placement policy for the nodes.
- Project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- Upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- Autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- Cluster
Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - Initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- Instance
Group []stringUrls - The resource URLs of the managed instance groups associated with this node pool.
- Location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- Managed
Instance []stringGroup Urls - List of instance group URLs which have been assigned to this node pool.
- Management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- Max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- Name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- Name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - Network
Config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- Node
Config Changes to this property will trigger replacement.
Pool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- Node
Count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - Node
Locations []string The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- Operation string
- Placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy Args - Specifies a custom placement policy for the nodes.
- Project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- Queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning Args - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- Upgrade
Settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- Version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster
Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Changes to this property will trigger replacement.
Pool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Integer - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation String
- placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy - Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster
Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group string[]Urls - The resource URLs of the managed instance groups associated with this node pool.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- managed
Instance string[]Group Urls - List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config NodePool Network Config - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Changes to this property will trigger replacement.
Pool Node Config - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations string[] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation string
- placement
Policy Changes to this property will trigger replacement.
Pool Placement Policy - Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade
Settings NodePool Upgrade Settings - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version string
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling
Node
Pool Autoscaling Args - Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster
Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial_
node_ count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance_
group_ Sequence[str]urls - The resource URLs of the managed instance groups associated with this node pool.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- managed_
instance_ Sequence[str]group_ urls - List of instance group URLs which have been assigned to this node pool.
- management
Node
Pool Management Args - Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max_
pods_ per_ node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name_
prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network_
config NodePool Network Config Args - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node_
config Changes to this property will trigger replacement.
Pool Node Config Args - Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node_
count int - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node_
locations Sequence[str] The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation str
- placement_
policy Changes to this property will trigger replacement.
Pool Placement Policy Args - Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued_
provisioning Changes to this property will trigger replacement.
Pool Queued Provisioning Args - Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade_
settings NodePool Upgrade Settings Args - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version str
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
- autoscaling Property Map
- Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
- cluster
Changes to this property will trigger replacement.
- The cluster to create the node pool for. Cluster must be present in
location
provided for clusters. May be specified in the formatprojects/{{project}}/locations/{{location}}/clusters/{{cluster}}
or as just the name of the cluster. - initial
Node Count Changes to this property will trigger replacement.
- The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
- instance
Group List<String>Urls - The resource URLs of the managed instance groups associated with this node pool.
- location
Changes to this property will trigger replacement.
- The location (region or zone) of the cluster.
- managed
Instance List<String>Group Urls - List of instance group URLs which have been assigned to this node pool.
- management Property Map
- Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
- name
Changes to this property will trigger replacement.
- The name of the node pool. If left blank, the provider will auto-generate a unique name.
- name
Prefix Changes to this property will trigger replacement.
- Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with
name
. - network
Config Property Map - The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
- node
Config Changes to this property will trigger replacement.
- Parameters used in creating the node pool. See gcp.container.Cluster for schema.
- node
Count Number - The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside
autoscaling
. - node
Locations List<String> The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level
node_locations
will be used.Note:
node_locations
will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.- operation String
- placement
Policy Changes to this property will trigger replacement.
- Specifies a custom placement policy for the nodes.
- project
Changes to this property will trigger replacement.
- The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
- queued
Provisioning Changes to this property will trigger replacement.
- Specifies node pool-level settings of queued provisioning. Structure is documented below.
- upgrade
Settings Property Map - Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
- version String
- The Kubernetes version for the nodes in this pool. Note that if this field
and
auto_upgrade
are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See thegcp.container.getEngineVersions
data source'sversion_prefix
field to approximate fuzzy versions in a provider-compatible way.
Supporting Types
NodePoolAutoscaling, NodePoolAutoscalingArgs
- Location
Policy string - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- Max
Node intCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- Min
Node intCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - Total
Max intNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Total
Min intNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Location
Policy string - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- Max
Node intCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- Min
Node intCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - Total
Max intNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- Total
Min intNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy String - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node IntegerCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node IntegerCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total
Max IntegerNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min IntegerNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy string - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node numberCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node numberCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total
Max numberNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min numberNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location_
policy str - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max_
node_ intcount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min_
node_ intcount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total_
max_ intnode_ count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total_
min_ intnode_ count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- location
Policy String - Location policy specifies the algorithm used when
scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.
- "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
- "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
- max
Node NumberCount - Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
- min
Node NumberCount - Minimum number of nodes per zone in the NodePool.
Must be >=0 and <=
max_node_count
. Cannot be used with total limits. - total
Max NumberNode Count - Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
- total
Min NumberNode Count - Total minimum number of nodes in the NodePool.
Must be >=0 and <=
total_max_node_count
. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
NodePoolManagement, NodePoolManagementArgs
- Auto
Repair bool - Whether the nodes will be automatically repaired. Enabled by default.
- Auto
Upgrade bool - Whether the nodes will be automatically upgraded. Enabled by default.
- Auto
Repair bool - Whether the nodes will be automatically repaired. Enabled by default.
- Auto
Upgrade bool - Whether the nodes will be automatically upgraded. Enabled by default.
- auto
Repair Boolean - Whether the nodes will be automatically repaired. Enabled by default.
- auto
Upgrade Boolean - Whether the nodes will be automatically upgraded. Enabled by default.
- auto
Repair boolean - Whether the nodes will be automatically repaired. Enabled by default.
- auto
Upgrade boolean - Whether the nodes will be automatically upgraded. Enabled by default.
- auto_
repair bool - Whether the nodes will be automatically repaired. Enabled by default.
- auto_
upgrade bool - Whether the nodes will be automatically upgraded. Enabled by default.
- auto
Repair Boolean - Whether the nodes will be automatically repaired. Enabled by default.
- auto
Upgrade Boolean - Whether the nodes will be automatically upgraded. Enabled by default.
NodePoolNetworkConfig, NodePoolNetworkConfigArgs
- Additional
Node Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Node Network Config> - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- Additional
Pod Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Pod Network Config> - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- Create
Pod Range Changes to this property will trigger replacement.
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - Enable
Private boolNodes - Whether nodes have internal IP addresses only.
- Network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- Pod
Cidr Overprovision Config Changes to this property will trigger replacement.
Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- Pod
Ipv4Cidr Block Changes to this property will trigger replacement.
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- Pod
Range Changes to this property will trigger replacement.
- The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- Additional
Node Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Node Network Config - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- Additional
Pod Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Pod Network Config - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- Create
Pod Range Changes to this property will trigger replacement.
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - Enable
Private boolNodes - Whether nodes have internal IP addresses only.
- Network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- Pod
Cidr Overprovision Config Changes to this property will trigger replacement.
Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- Pod
Ipv4Cidr Block Changes to this property will trigger replacement.
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- Pod
Range Changes to this property will trigger replacement.
- The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional
Node Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Node Network Config> - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional
Pod Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Pod Network Config> - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create
Pod Range Changes to this property will trigger replacement.
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable
Private BooleanNodes - Whether nodes have internal IP addresses only.
- network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- pod
Cidr Overprovision Config Changes to this property will trigger replacement.
Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod
Ipv4Cidr Block Changes to this property will trigger replacement.
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range Changes to this property will trigger replacement.
- The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional
Node Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Node Network Config[] - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional
Pod Network Configs Changes to this property will trigger replacement.
Pool Network Config Additional Pod Network Config[] - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create
Pod Range Changes to this property will trigger replacement.
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable
Private booleanNodes - Whether nodes have internal IP addresses only.
- network
Performance NodeConfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- pod
Cidr Overprovision Config Changes to this property will trigger replacement.
Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod
Ipv4Cidr Block Changes to this property will trigger replacement.
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range Changes to this property will trigger replacement.
- The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional_
node_ network_ configs Changes to this property will trigger replacement.
Pool Network Config Additional Node Network Config] - We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional_
pod_ network_ configs Changes to this property will trigger replacement.
Pool Network Config Additional Pod Network Config] - We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create_
pod_ range Changes to this property will trigger replacement.
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable_
private_ boolnodes - Whether nodes have internal IP addresses only.
- network_
performance_ Nodeconfig Pool Network Config Network Performance Config - Network bandwidth tier configuration. Structure is documented below.
- pod_
cidr_ overprovision_ config Changes to this property will trigger replacement.
Pool Network Config Pod Cidr Overprovision Config - Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod_
ipv4_ cidr_ block Changes to this property will trigger replacement.
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod_
range Changes to this property will trigger replacement.
- The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
- additional
Node Network Configs Changes to this property will trigger replacement.
- We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
- additional
Pod Network Configs Changes to this property will trigger replacement.
- We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
- create
Pod Range Changes to this property will trigger replacement.
- Whether to create a new range for pod IPs in this node pool. Defaults are provided for
pod_range
andpod_ipv4_cidr_block
if they are not specified. - enable
Private BooleanNodes - Whether nodes have internal IP addresses only.
- network
Performance Property MapConfig - Network bandwidth tier configuration. Structure is documented below.
- pod
Cidr Overprovision Config Changes to this property will trigger replacement.
- Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
- pod
Ipv4Cidr Block Changes to this property will trigger replacement.
- The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
- pod
Range Changes to this property will trigger replacement.
- The ID of the secondary range for pod IPs. If
create_pod_range
is true, this ID is used for the new range. Ifcreate_pod_range
is false, uses an existing secondary range with this ID.
NodePoolNetworkConfigAdditionalNodeNetworkConfig, NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs
- Network
Changes to this property will trigger replacement.
- Name of the VPC where the additional interface belongs.
- Subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional interface belongs.
- Network
Changes to this property will trigger replacement.
- Name of the VPC where the additional interface belongs.
- Subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional interface belongs.
- network
Changes to this property will trigger replacement.
- Name of the VPC where the additional interface belongs.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional interface belongs.
- network
Changes to this property will trigger replacement.
- Name of the VPC where the additional interface belongs.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional interface belongs.
- network
Changes to this property will trigger replacement.
- Name of the VPC where the additional interface belongs.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional interface belongs.
- network
Changes to this property will trigger replacement.
- Name of the VPC where the additional interface belongs.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional interface belongs.
NodePoolNetworkConfigAdditionalPodNetworkConfig, NodePoolNetworkConfigAdditionalPodNetworkConfigArgs
- Max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node which use this pod network.
- Secondary
Pod Range Changes to this property will trigger replacement.
- The name of the secondary range on the subnet which provides IP address for this pod range.
- Subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional pod network belongs.
- Max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node which use this pod network.
- Secondary
Pod Range Changes to this property will trigger replacement.
- The name of the secondary range on the subnet which provides IP address for this pod range.
- Subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional pod network belongs.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node which use this pod network.
- secondary
Pod Range Changes to this property will trigger replacement.
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional pod network belongs.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node which use this pod network.
- secondary
Pod Range Changes to this property will trigger replacement.
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional pod network belongs.
- max_
pods_ per_ node Changes to this property will trigger replacement.
- The maximum number of pods per node which use this pod network.
- secondary_
pod_ range Changes to this property will trigger replacement.
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional pod network belongs.
- max
Pods Per Node Changes to this property will trigger replacement.
- The maximum number of pods per node which use this pod network.
- secondary
Pod Range Changes to this property will trigger replacement.
- The name of the secondary range on the subnet which provides IP address for this pod range.
- subnetwork
Changes to this property will trigger replacement.
- Name of the subnetwork where the additional pod network belongs.
NodePoolNetworkConfigNetworkPerformanceConfig, NodePoolNetworkConfigNetworkPerformanceConfigArgs
- Total
Egress Bandwidth Tier This property is required. string - Specifies the total network bandwidth tier for the NodePool.
- Total
Egress Bandwidth Tier This property is required. string - Specifies the total network bandwidth tier for the NodePool.
- total
Egress Bandwidth Tier This property is required. String - Specifies the total network bandwidth tier for the NodePool.
- total
Egress Bandwidth Tier This property is required. string - Specifies the total network bandwidth tier for the NodePool.
- total_
egress_ bandwidth_ tier This property is required. str - Specifies the total network bandwidth tier for the NodePool.
- total
Egress Bandwidth Tier This property is required. String - Specifies the total network bandwidth tier for the NodePool.
NodePoolNetworkConfigPodCidrOverprovisionConfig, NodePoolNetworkConfigPodCidrOverprovisionConfigArgs
- Disabled
This property is required. bool - Whether pod cidr overprovision is disabled.
- Disabled
This property is required. bool - Whether pod cidr overprovision is disabled.
- disabled
This property is required. Boolean - Whether pod cidr overprovision is disabled.
- disabled
This property is required. boolean - Whether pod cidr overprovision is disabled.
- disabled
This property is required. bool - Whether pod cidr overprovision is disabled.
- disabled
This property is required. Boolean - Whether pod cidr overprovision is disabled.
NodePoolNodeConfig, NodePoolNodeConfigArgs
- Advanced
Machine Features Changes to this property will trigger replacement.
Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- Boot
Disk Kms Key Changes to this property will trigger replacement.
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- Confidential
Nodes Changes to this property will trigger replacement.
Pool Node Config Confidential Nodes - Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- Containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- Disk
Size intGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- Disk
Type string - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- Effective
Taints List<NodePool Node Config Effective Taint> - List of kubernetes taints applied to each node.
- Enable
Confidential Storage Changes to this property will trigger replacement.
- If enabled boot disks are configured with confidential mode.
- Ephemeral
Storage Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Ephemeral
Storage Local Ssd Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- Gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- Guest
Accelerators Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator> - List of the type and count of accelerator cards attached to the instance.
- Gvnic
Changes to this property will trigger replacement.
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- Host
Maintenance Policy Changes to this property will trigger replacement.
Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- Image
Type string - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- Kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- Labels Dictionary<string, string>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- Linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- Local
Nvme Ssd Block Config Changes to this property will trigger replacement.
Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- Local
Ssd Count Changes to this property will trigger replacement.
- The number of local SSD disks to be attached to the node.
- Local
Ssd Encryption Mode Changes to this property will trigger replacement.
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- Logging
Variant string - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- Machine
Type string - The name of a Google Compute Engine machine type.
- Max
Run Duration Changes to this property will trigger replacement.
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- Metadata
Changes to this property will trigger replacement.
- The metadata key/value pairs assigned to instances in the cluster.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- Node
Group Changes to this property will trigger replacement.
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- Oauth
Scopes Changes to this property will trigger replacement.
- The set of Google API scopes to be made available on all of the node VMs.
- Preemptible
Changes to this property will trigger replacement.
- Whether the nodes are created as preemptible VM instances.
- Reservation
Affinity Changes to this property will trigger replacement.
Pool Node Config Reservation Affinity The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- Resource
Labels Dictionary<string, string> - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Dictionary<string, string>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- Sandbox
Config Changes to this property will trigger replacement.
Pool Node Config Sandbox Config - Sandbox configuration for this node.
- Secondary
Boot Disks Changes to this property will trigger replacement.
Pool Node Config Secondary Boot Disk> - Secondary boot disks for preloading data or container images.
- Service
Account Changes to this property will trigger replacement.
- The Google Cloud Platform Service Account to be used by the node VMs.
- Shielded
Instance Config Changes to this property will trigger replacement.
Pool Node Config Shielded Instance Config - Shielded Instance options.
- Sole
Tenant Config Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- Spot
Changes to this property will trigger replacement.
- Whether the nodes are created as spot VM instances.
- Storage
Pools Changes to this property will trigger replacement.
- The list of Storage Pools where boot disks are provisioned.
- List<string>
- The list of instance tags applied to all nodes.
- Taints
List<Node
Pool Node Config Taint> - List of Kubernetes taints to be applied to each node.
- Workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- Advanced
Machine Features Changes to this property will trigger replacement.
Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- Boot
Disk Kms Key Changes to this property will trigger replacement.
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- Confidential
Nodes Changes to this property will trigger replacement.
Pool Node Config Confidential Nodes - Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- Containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- Disk
Size intGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- Disk
Type string - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- Effective
Taints []NodePool Node Config Effective Taint - List of kubernetes taints applied to each node.
- Enable
Confidential Storage Changes to this property will trigger replacement.
- If enabled boot disks are configured with confidential mode.
- Ephemeral
Storage Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Ephemeral
Storage Local Ssd Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- Fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- Gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- Guest
Accelerators Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator - List of the type and count of accelerator cards attached to the instance.
- Gvnic
Changes to this property will trigger replacement.
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- Host
Maintenance Policy Changes to this property will trigger replacement.
Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- Image
Type string - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- Kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- Labels map[string]string
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- Linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- Local
Nvme Ssd Block Config Changes to this property will trigger replacement.
Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- Local
Ssd Count Changes to this property will trigger replacement.
- The number of local SSD disks to be attached to the node.
- Local
Ssd Encryption Mode Changes to this property will trigger replacement.
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- Logging
Variant string - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- Machine
Type string - The name of a Google Compute Engine machine type.
- Max
Run Duration Changes to this property will trigger replacement.
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- Metadata
Changes to this property will trigger replacement.
- The metadata key/value pairs assigned to instances in the cluster.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- Node
Group Changes to this property will trigger replacement.
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- Oauth
Scopes Changes to this property will trigger replacement.
- The set of Google API scopes to be made available on all of the node VMs.
- Preemptible
Changes to this property will trigger replacement.
- Whether the nodes are created as preemptible VM instances.
- Reservation
Affinity Changes to this property will trigger replacement.
Pool Node Config Reservation Affinity The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- Resource
Labels map[string]string - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- map[string]string
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- Sandbox
Config Changes to this property will trigger replacement.
Pool Node Config Sandbox Config - Sandbox configuration for this node.
- Secondary
Boot Disks Changes to this property will trigger replacement.
Pool Node Config Secondary Boot Disk - Secondary boot disks for preloading data or container images.
- Service
Account Changes to this property will trigger replacement.
- The Google Cloud Platform Service Account to be used by the node VMs.
- Shielded
Instance Config Changes to this property will trigger replacement.
Pool Node Config Shielded Instance Config - Shielded Instance options.
- Sole
Tenant Config Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- Spot
Changes to this property will trigger replacement.
- Whether the nodes are created as spot VM instances.
- Storage
Pools Changes to this property will trigger replacement.
- The list of Storage Pools where boot disks are provisioned.
- []string
- The list of instance tags applied to all nodes.
- Taints
[]Node
Pool Node Config Taint - List of Kubernetes taints to be applied to each node.
- Workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced
Machine Features Changes to this property will trigger replacement.
Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- boot
Disk Kms Key Changes to this property will trigger replacement.
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential
Nodes Changes to this property will trigger replacement.
Pool Node Config Confidential Nodes - Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- disk
Size IntegerGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk
Type String - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective
Taints List<NodePool Node Config Effective Taint> - List of kubernetes taints applied to each node.
- enable
Confidential Storage Changes to this property will trigger replacement.
- If enabled boot disks are configured with confidential mode.
- ephemeral
Storage Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral
Storage Local Ssd Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- guest
Accelerators Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator> - List of the type and count of accelerator cards attached to the instance.
- gvnic
Changes to this property will trigger replacement.
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- host
Maintenance Policy Changes to this property will trigger replacement.
Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- image
Type String - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- labels Map<String,String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- local
Nvme Ssd Block Config Changes to this property will trigger replacement.
Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- local
Ssd Count Changes to this property will trigger replacement.
- The number of local SSD disks to be attached to the node.
- local
Ssd Encryption Mode Changes to this property will trigger replacement.
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- logging
Variant String - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine
Type String - The name of a Google Compute Engine machine type.
- max
Run Duration Changes to this property will trigger replacement.
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata
Changes to this property will trigger replacement.
- The metadata key/value pairs assigned to instances in the cluster.
- min
Cpu Platform Changes to this property will trigger replacement.
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node
Group Changes to this property will trigger replacement.
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth
Scopes Changes to this property will trigger replacement.
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible
Changes to this property will trigger replacement.
- Whether the nodes are created as preemptible VM instances.
- reservation
Affinity Changes to this property will trigger replacement.
Pool Node Config Reservation Affinity The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- resource
Labels Map<String,String> - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Map<String,String>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox
Config Changes to this property will trigger replacement.
Pool Node Config Sandbox Config - Sandbox configuration for this node.
- secondary
Boot Disks Changes to this property will trigger replacement.
Pool Node Config Secondary Boot Disk> - Secondary boot disks for preloading data or container images.
- service
Account Changes to this property will trigger replacement.
- The Google Cloud Platform Service Account to be used by the node VMs.
- shielded
Instance Config Changes to this property will trigger replacement.
Pool Node Config Shielded Instance Config - Shielded Instance options.
- sole
Tenant Config Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- spot
Changes to this property will trigger replacement.
- Whether the nodes are created as spot VM instances.
- storage
Pools Changes to this property will trigger replacement.
- The list of Storage Pools where boot disks are provisioned.
- List<String>
- The list of instance tags applied to all nodes.
- taints
List<Node
Pool Node Config Taint> - List of Kubernetes taints to be applied to each node.
- workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced
Machine Features Changes to this property will trigger replacement.
Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- boot
Disk Kms Key Changes to this property will trigger replacement.
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential
Nodes Changes to this property will trigger replacement.
Pool Node Config Confidential Nodes - Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerd
Config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- disk
Size numberGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk
Type string - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective
Taints NodePool Node Config Effective Taint[] - List of kubernetes taints applied to each node.
- enable
Confidential Storage Changes to this property will trigger replacement.
- If enabled boot disks are configured with confidential mode.
- ephemeral
Storage Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral
Storage Local Ssd Config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast
Socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- gcfs
Config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- guest
Accelerators Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator[] - List of the type and count of accelerator cards attached to the instance.
- gvnic
Changes to this property will trigger replacement.
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- host
Maintenance Policy Changes to this property will trigger replacement.
Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- image
Type string - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet
Config NodePool Node Config Kubelet Config - Node kubelet configs.
- labels {[key: string]: string}
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux
Node NodeConfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- local
Nvme Ssd Block Config Changes to this property will trigger replacement.
Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- local
Ssd Count Changes to this property will trigger replacement.
- The number of local SSD disks to be attached to the node.
- local
Ssd Encryption Mode Changes to this property will trigger replacement.
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- logging
Variant string - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine
Type string - The name of a Google Compute Engine machine type.
- max
Run Duration Changes to this property will trigger replacement.
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata
Changes to this property will trigger replacement.
- The metadata key/value pairs assigned to instances in the cluster.
- min
Cpu Platform Changes to this property will trigger replacement.
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node
Group Changes to this property will trigger replacement.
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth
Scopes Changes to this property will trigger replacement.
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible
Changes to this property will trigger replacement.
- Whether the nodes are created as preemptible VM instances.
- reservation
Affinity Changes to this property will trigger replacement.
Pool Node Config Reservation Affinity The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- resource
Labels {[key: string]: string} - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- {[key: string]: string}
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox
Config Changes to this property will trigger replacement.
Pool Node Config Sandbox Config - Sandbox configuration for this node.
- secondary
Boot Disks Changes to this property will trigger replacement.
Pool Node Config Secondary Boot Disk[] - Secondary boot disks for preloading data or container images.
- service
Account Changes to this property will trigger replacement.
- The Google Cloud Platform Service Account to be used by the node VMs.
- shielded
Instance Config Changes to this property will trigger replacement.
Pool Node Config Shielded Instance Config - Shielded Instance options.
- sole
Tenant Config Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- spot
Changes to this property will trigger replacement.
- Whether the nodes are created as spot VM instances.
- storage
Pools Changes to this property will trigger replacement.
- The list of Storage Pools where boot disks are provisioned.
- string[]
- The list of instance tags applied to all nodes.
- taints
Node
Pool Node Config Taint[] - List of Kubernetes taints to be applied to each node.
- workload
Metadata NodeConfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced_
machine_ features Changes to this property will trigger replacement.
Pool Node Config Advanced Machine Features - Specifies options for controlling advanced machine features.
- boot_
disk_ kms_ key Changes to this property will trigger replacement.
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential_
nodes Changes to this property will trigger replacement.
Pool Node Config Confidential Nodes - Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerd_
config NodePool Node Config Containerd Config - Parameters for containerd configuration.
- disk_
size_ intgb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk_
type str - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective_
taints Sequence[NodePool Node Config Effective Taint] - List of kubernetes taints applied to each node.
- enable_
confidential_ storage Changes to this property will trigger replacement.
- If enabled boot disks are configured with confidential mode.
- ephemeral_
storage_ config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral_
storage_ local_ ssd_ config Changes to this property will trigger replacement.
Pool Node Config Ephemeral Storage Local Ssd Config - Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast_
socket NodePool Node Config Fast Socket - Enable or disable NCCL Fast Socket in the node pool.
- gcfs_
config NodePool Node Config Gcfs Config - GCFS configuration for this node.
- guest_
accelerators Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator] - List of the type and count of accelerator cards attached to the instance.
- gvnic
Changes to this property will trigger replacement.
Pool Node Config Gvnic - Enable or disable gvnic in the node pool.
- host_
maintenance_ policy Changes to this property will trigger replacement.
Pool Node Config Host Maintenance Policy - The maintenance policy for the hosts on which the GKE VMs run on.
- image_
type str - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet_
config NodePool Node Config Kubelet Config - Node kubelet configs.
- labels Mapping[str, str]
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux_
node_ Nodeconfig Pool Node Config Linux Node Config - Parameters that can be configured on Linux nodes.
- local_
nvme_ ssd_ block_ config Changes to this property will trigger replacement.
Pool Node Config Local Nvme Ssd Block Config - Parameters for raw-block local NVMe SSDs.
- local_
ssd_ count Changes to this property will trigger replacement.
- The number of local SSD disks to be attached to the node.
- local_
ssd_ encryption_ mode Changes to this property will trigger replacement.
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- logging_
variant str - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine_
type str - The name of a Google Compute Engine machine type.
- max_
run_ duration Changes to this property will trigger replacement.
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata
Changes to this property will trigger replacement.
- The metadata key/value pairs assigned to instances in the cluster.
- min_
cpu_ platform Changes to this property will trigger replacement.
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node_
group Changes to this property will trigger replacement.
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth_
scopes Changes to this property will trigger replacement.
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible
Changes to this property will trigger replacement.
- Whether the nodes are created as preemptible VM instances.
- reservation_
affinity Changes to this property will trigger replacement.
Pool Node Config Reservation Affinity The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- resource_
labels Mapping[str, str] - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Mapping[str, str]
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox_
config Changes to this property will trigger replacement.
Pool Node Config Sandbox Config - Sandbox configuration for this node.
- secondary_
boot_ disks Changes to this property will trigger replacement.
Pool Node Config Secondary Boot Disk] - Secondary boot disks for preloading data or container images.
- service_
account Changes to this property will trigger replacement.
- The Google Cloud Platform Service Account to be used by the node VMs.
- shielded_
instance_ config Changes to this property will trigger replacement.
Pool Node Config Shielded Instance Config - Shielded Instance options.
- sole_
tenant_ config Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config - Node affinity options for sole tenant node pools.
- spot
Changes to this property will trigger replacement.
- Whether the nodes are created as spot VM instances.
- storage_
pools Changes to this property will trigger replacement.
- The list of Storage Pools where boot disks are provisioned.
- Sequence[str]
- The list of instance tags applied to all nodes.
- taints
Sequence[Node
Pool Node Config Taint] - List of Kubernetes taints to be applied to each node.
- workload_
metadata_ Nodeconfig Pool Node Config Workload Metadata Config - The workload metadata configuration for this node.
- advanced
Machine Features Changes to this property will trigger replacement.
- Specifies options for controlling advanced machine features.
- boot
Disk Kms Key Changes to this property will trigger replacement.
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
- confidential
Nodes Changes to this property will trigger replacement.
- Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
- containerd
Config Property Map - Parameters for containerd configuration.
- disk
Size NumberGb - Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
- disk
Type String - Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
- effective
Taints List<Property Map> - List of kubernetes taints applied to each node.
- enable
Confidential Storage Changes to this property will trigger replacement.
- If enabled boot disks are configured with confidential mode.
- ephemeral
Storage Config Changes to this property will trigger replacement.
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- ephemeral
Storage Local Ssd Config Changes to this property will trigger replacement.
- Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
- fast
Socket Property Map - Enable or disable NCCL Fast Socket in the node pool.
- gcfs
Config Property Map - GCFS configuration for this node.
- guest
Accelerators Changes to this property will trigger replacement.
- List of the type and count of accelerator cards attached to the instance.
- gvnic
Changes to this property will trigger replacement.
- Enable or disable gvnic in the node pool.
- host
Maintenance Policy Changes to this property will trigger replacement.
- The maintenance policy for the hosts on which the GKE VMs run on.
- image
Type String - The image type to use for this node. Note that for a given image type, the latest version of it will be used.
- kubelet
Config Property Map - Node kubelet configs.
- labels Map<String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
- linux
Node Property MapConfig - Parameters that can be configured on Linux nodes.
- local
Nvme Ssd Block Config Changes to this property will trigger replacement.
- Parameters for raw-block local NVMe SSDs.
- local
Ssd Count Changes to this property will trigger replacement.
- The number of local SSD disks to be attached to the node.
- local
Ssd Encryption Mode Changes to this property will trigger replacement.
- LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
- logging
Variant String - Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
- machine
Type String - The name of a Google Compute Engine machine type.
- max
Run Duration Changes to this property will trigger replacement.
- The runtime of each node in the node pool in seconds, terminated by 's'. Example: "3600s".
- metadata
Changes to this property will trigger replacement.
- The metadata key/value pairs assigned to instances in the cluster.
- min
Cpu Platform Changes to this property will trigger replacement.
- Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
- node
Group Changes to this property will trigger replacement.
- Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
- oauth
Scopes Changes to this property will trigger replacement.
- The set of Google API scopes to be made available on all of the node VMs.
- preemptible
Changes to this property will trigger replacement.
- Whether the nodes are created as preemptible VM instances.
- reservation
Affinity Changes to this property will trigger replacement.
The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
The
autoscaling
block supports (either total or per zone limits are required):- resource
Labels Map<String> - The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
- Map<String>
- A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
- sandbox
Config Changes to this property will trigger replacement.
- Sandbox configuration for this node.
- secondary
Boot Disks Changes to this property will trigger replacement.
- Secondary boot disks for preloading data or container images.
- service
Account Changes to this property will trigger replacement.
- The Google Cloud Platform Service Account to be used by the node VMs.
- shielded
Instance Config Changes to this property will trigger replacement.
- Shielded Instance options.
- sole
Tenant Config Changes to this property will trigger replacement.
- Node affinity options for sole tenant node pools.
- spot
Changes to this property will trigger replacement.
- Whether the nodes are created as spot VM instances.
- storage
Pools Changes to this property will trigger replacement.
- The list of Storage Pools where boot disks are provisioned.
- List<String>
- The list of instance tags applied to all nodes.
- taints List<Property Map>
- List of Kubernetes taints to be applied to each node.
- workload
Metadata Property MapConfig - The workload metadata configuration for this node.
NodePoolNodeConfigAdvancedMachineFeatures, NodePoolNodeConfigAdvancedMachineFeaturesArgs
- Threads
Per Core This property is required. Changes to this property will trigger replacement.
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- Enable
Nested Virtualization Changes to this property will trigger replacement.
- Whether the node should have nested virtualization enabled.
- Threads
Per Core This property is required. Changes to this property will trigger replacement.
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- Enable
Nested Virtualization Changes to this property will trigger replacement.
- Whether the node should have nested virtualization enabled.
- threads
Per Core This property is required. Changes to this property will trigger replacement.
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable
Nested Virtualization Changes to this property will trigger replacement.
- Whether the node should have nested virtualization enabled.
- threads
Per Core This property is required. Changes to this property will trigger replacement.
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable
Nested Virtualization Changes to this property will trigger replacement.
- Whether the node should have nested virtualization enabled.
- threads_
per_ core This property is required. Changes to this property will trigger replacement.
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable_
nested_ virtualization Changes to this property will trigger replacement.
- Whether the node should have nested virtualization enabled.
- threads
Per Core This property is required. Changes to this property will trigger replacement.
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
- enable
Nested Virtualization Changes to this property will trigger replacement.
- Whether the node should have nested virtualization enabled.
NodePoolNodeConfigConfidentialNodes, NodePoolNodeConfigConfidentialNodesArgs
- Enabled
This property is required. Changes to this property will trigger replacement.
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- Enabled
This property is required. Changes to this property will trigger replacement.
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
NodePoolNodeConfigContainerdConfig, NodePoolNodeConfigContainerdConfigArgs
- Private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- Private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private
Registry NodeAccess Config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private_
registry_ Nodeaccess_ config Pool Node Config Containerd Config Private Registry Access Config - Parameters for private container registries configuration.
- private
Registry Property MapAccess Config - Parameters for private container registries configuration.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs
- Enabled
This property is required. bool - Whether or not private registries are configured.
- List<Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config> - Parameters for configuring CA certificate and domains.
- Enabled
This property is required. bool - Whether or not private registries are configured.
- []Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config - Parameters for configuring CA certificate and domains.
- enabled
This property is required. Boolean - Whether or not private registries are configured.
- List<Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config> - Parameters for configuring CA certificate and domains.
- enabled
This property is required. boolean - Whether or not private registries are configured.
- Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config[] - Parameters for configuring CA certificate and domains.
- enabled
This property is required. bool - Whether or not private registries are configured.
- Sequence[Node
Pool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config] - Parameters for configuring CA certificate and domains.
- enabled
This property is required. Boolean - Whether or not private registries are configured.
- List<Property Map>
- Parameters for configuring CA certificate and domains.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs
- Fqdns
This property is required. List<string> - List of fully-qualified-domain-names. IPv4s and port specification are supported.
- Gcp
Secret Manager Certificate Config This property is required. NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- Fqdns
This property is required. []string - List of fully-qualified-domain-names. IPv4s and port specification are supported.
- Gcp
Secret Manager Certificate Config This property is required. NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns
This property is required. List<String> - List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp
Secret Manager Certificate Config This property is required. NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns
This property is required. string[] - List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp
Secret Manager Certificate Config This property is required. NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns
This property is required. Sequence[str] - List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp_
secret_ manager_ certificate_ config This property is required. NodePool Node Config Containerd Config Private Registry Access Config Certificate Authority Domain Config Gcp Secret Manager Certificate Config - Parameters for configuring a certificate hosted in GCP SecretManager.
- fqdns
This property is required. List<String> - List of fully-qualified-domain-names. IPv4s and port specification are supported.
- gcp
Secret Manager Certificate Config This property is required. Property Map - Parameters for configuring a certificate hosted in GCP SecretManager.
NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs
- Secret
Uri This property is required. string - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- Secret
Uri This property is required. string - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret
Uri This property is required. String - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret
Uri This property is required. string - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret_
uri This property is required. str - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
- secret
Uri This property is required. String - URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
NodePoolNodeConfigEffectiveTaint, NodePoolNodeConfigEffectiveTaintArgs
NodePoolNodeConfigEphemeralStorageConfig, NodePoolNodeConfigEphemeralStorageConfigArgs
- Local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- Local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local_
ssd_ count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
NodePoolNodeConfigEphemeralStorageLocalSsdConfig, NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs
- Local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- Local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local_
ssd_ count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
NodePoolNodeConfigFastSocket, NodePoolNodeConfigFastSocketArgs
- Enabled
This property is required. bool - Whether or not NCCL Fast Socket is enabled
- Enabled
This property is required. bool - Whether or not NCCL Fast Socket is enabled
- enabled
This property is required. Boolean - Whether or not NCCL Fast Socket is enabled
- enabled
This property is required. boolean - Whether or not NCCL Fast Socket is enabled
- enabled
This property is required. bool - Whether or not NCCL Fast Socket is enabled
- enabled
This property is required. Boolean - Whether or not NCCL Fast Socket is enabled
NodePoolNodeConfigGcfsConfig, NodePoolNodeConfigGcfsConfigArgs
- Enabled
This property is required. bool - Whether or not GCFS is enabled
- Enabled
This property is required. bool - Whether or not GCFS is enabled
- enabled
This property is required. Boolean - Whether or not GCFS is enabled
- enabled
This property is required. boolean - Whether or not GCFS is enabled
- enabled
This property is required. bool - Whether or not GCFS is enabled
- enabled
This property is required. Boolean - Whether or not GCFS is enabled
NodePoolNodeConfigGuestAccelerator, NodePoolNodeConfigGuestAcceleratorArgs
- Count
This property is required. Changes to this property will trigger replacement.
- The number of the accelerator cards exposed to an instance.
- Type
This property is required. Changes to this property will trigger replacement.
- The accelerator type resource name.
- Gpu
Driver Installation Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- Gpu
Partition Size Changes to this property will trigger replacement.
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- Gpu
Sharing Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- Count
This property is required. Changes to this property will trigger replacement.
- The number of the accelerator cards exposed to an instance.
- Type
This property is required. Changes to this property will trigger replacement.
- The accelerator type resource name.
- Gpu
Driver Installation Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- Gpu
Partition Size Changes to this property will trigger replacement.
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- Gpu
Sharing Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count
This property is required. Changes to this property will trigger replacement.
- The number of the accelerator cards exposed to an instance.
- type
This property is required. Changes to this property will trigger replacement.
- The accelerator type resource name.
- gpu
Driver Installation Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- gpu
Partition Size Changes to this property will trigger replacement.
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu
Sharing Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count
This property is required. Changes to this property will trigger replacement.
- The number of the accelerator cards exposed to an instance.
- type
This property is required. Changes to this property will trigger replacement.
- The accelerator type resource name.
- gpu
Driver Installation Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- gpu
Partition Size Changes to this property will trigger replacement.
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu
Sharing Config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count
This property is required. Changes to this property will trigger replacement.
- The number of the accelerator cards exposed to an instance.
- type
This property is required. Changes to this property will trigger replacement.
- The accelerator type resource name.
- gpu_
driver_ installation_ config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Driver Installation Config - Configuration for auto installation of GPU driver.
- gpu_
partition_ size Changes to this property will trigger replacement.
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu_
sharing_ config Changes to this property will trigger replacement.
Pool Node Config Guest Accelerator Gpu Sharing Config - Configuration for GPU sharing.
- count
This property is required. Changes to this property will trigger replacement.
- The number of the accelerator cards exposed to an instance.
- type
This property is required. Changes to this property will trigger replacement.
- The accelerator type resource name.
- gpu
Driver Installation Config Changes to this property will trigger replacement.
- Configuration for auto installation of GPU driver.
- gpu
Partition Size Changes to this property will trigger replacement.
- Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
- gpu
Sharing Config Changes to this property will trigger replacement.
- Configuration for GPU sharing.
NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig, NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs
- Gpu
Driver Version This property is required. Changes to this property will trigger replacement.
- Mode for how the GPU driver is installed.
- Gpu
Driver Version This property is required. Changes to this property will trigger replacement.
- Mode for how the GPU driver is installed.
- gpu
Driver Version This property is required. Changes to this property will trigger replacement.
- Mode for how the GPU driver is installed.
- gpu
Driver Version This property is required. Changes to this property will trigger replacement.
- Mode for how the GPU driver is installed.
- gpu_
driver_ version This property is required. Changes to this property will trigger replacement.
- Mode for how the GPU driver is installed.
- gpu
Driver Version This property is required. Changes to this property will trigger replacement.
- Mode for how the GPU driver is installed.
NodePoolNodeConfigGuestAcceleratorGpuSharingConfig, NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs
- Gpu
Sharing Strategy This property is required. Changes to this property will trigger replacement.
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
This property is required. Changes to this property will trigger replacement.
- The maximum number of containers that can share a GPU.
- Gpu
Sharing Strategy This property is required. Changes to this property will trigger replacement.
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
This property is required. Changes to this property will trigger replacement.
- The maximum number of containers that can share a GPU.
- gpu
Sharing Strategy This property is required. Changes to this property will trigger replacement.
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
This property is required. Changes to this property will trigger replacement.
- The maximum number of containers that can share a GPU.
- gpu
Sharing Strategy This property is required. Changes to this property will trigger replacement.
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
This property is required. Changes to this property will trigger replacement.
- The maximum number of containers that can share a GPU.
- gpu_
sharing_ strategy This property is required. Changes to this property will trigger replacement.
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
This property is required. Changes to this property will trigger replacement.
- The maximum number of containers that can share a GPU.
- gpu
Sharing Strategy This property is required. Changes to this property will trigger replacement.
- The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
This property is required. Changes to this property will trigger replacement.
- The maximum number of containers that can share a GPU.
NodePoolNodeConfigGvnic, NodePoolNodeConfigGvnicArgs
- Enabled
This property is required. Changes to this property will trigger replacement.
- Whether or not gvnic is enabled
- Enabled
This property is required. Changes to this property will trigger replacement.
- Whether or not gvnic is enabled
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether or not gvnic is enabled
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether or not gvnic is enabled
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether or not gvnic is enabled
- enabled
This property is required. Changes to this property will trigger replacement.
- Whether or not gvnic is enabled
NodePoolNodeConfigHostMaintenancePolicy, NodePoolNodeConfigHostMaintenancePolicyArgs
- Maintenance
Interval This property is required. Changes to this property will trigger replacement.
- .
- Maintenance
Interval This property is required. Changes to this property will trigger replacement.
- .
- maintenance
Interval This property is required. Changes to this property will trigger replacement.
- .
- maintenance
Interval This property is required. Changes to this property will trigger replacement.
- .
- maintenance_
interval This property is required. Changes to this property will trigger replacement.
- .
- maintenance
Interval This property is required. Changes to this property will trigger replacement.
- .
NodePoolNodeConfigKubeletConfig, NodePoolNodeConfigKubeletConfigArgs
- Allowed
Unsafe List<string>Sysctls - Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- Container
Log intMax Files - Defines the maximum number of container log files that can be present for a container.
- Container
Log stringMax Size - Defines the maximum size of the container log file before it is rotated.
- Cpu
Cfs boolQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- Cpu
Cfs stringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- Cpu
Manager stringPolicy - Control the CPU management policy on the node.
- Image
Gc intHigh Threshold Percent - Defines the percent of disk usage after which image garbage collection is always run.
- Image
Gc intLow Threshold Percent - Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- Image
Maximum stringGc Age - Defines the maximum age an image can be unused before it is garbage collected.
- Image
Minimum stringGc Age - Defines the minimum age for an unused image before it is garbage collected.
- Insecure
Kubelet stringReadonly Port Enabled - Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to
FALSE
. Possible values:TRUE
,FALSE
. - Pod
Pids intLimit - Controls the maximum number of processes allowed to run in a pod.
- Allowed
Unsafe []stringSysctls - Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- Container
Log intMax Files - Defines the maximum number of container log files that can be present for a container.
- Container
Log stringMax Size - Defines the maximum size of the container log file before it is rotated.
- Cpu
Cfs boolQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- Cpu
Cfs stringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- Cpu
Manager stringPolicy - Control the CPU management policy on the node.
- Image
Gc intHigh Threshold Percent - Defines the percent of disk usage after which image garbage collection is always run.
- Image
Gc intLow Threshold Percent - Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- Image
Maximum stringGc Age - Defines the maximum age an image can be unused before it is garbage collected.
- Image
Minimum stringGc Age - Defines the minimum age for an unused image before it is garbage collected.
- Insecure
Kubelet stringReadonly Port Enabled - Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to
FALSE
. Possible values:TRUE
,FALSE
. - Pod
Pids intLimit - Controls the maximum number of processes allowed to run in a pod.
- allowed
Unsafe List<String>Sysctls - Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- container
Log IntegerMax Files - Defines the maximum number of container log files that can be present for a container.
- container
Log StringMax Size - Defines the maximum size of the container log file before it is rotated.
- cpu
Cfs BooleanQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu
Cfs StringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpu
Manager StringPolicy - Control the CPU management policy on the node.
- image
Gc IntegerHigh Threshold Percent - Defines the percent of disk usage after which image garbage collection is always run.
- image
Gc IntegerLow Threshold Percent - Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- image
Maximum StringGc Age - Defines the maximum age an image can be unused before it is garbage collected.
- image
Minimum StringGc Age - Defines the minimum age for an unused image before it is garbage collected.
- insecure
Kubelet StringReadonly Port Enabled - Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to
FALSE
. Possible values:TRUE
,FALSE
. - pod
Pids IntegerLimit - Controls the maximum number of processes allowed to run in a pod.
- allowed
Unsafe string[]Sysctls - Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- container
Log numberMax Files - Defines the maximum number of container log files that can be present for a container.
- container
Log stringMax Size - Defines the maximum size of the container log file before it is rotated.
- cpu
Cfs booleanQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu
Cfs stringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpu
Manager stringPolicy - Control the CPU management policy on the node.
- image
Gc numberHigh Threshold Percent - Defines the percent of disk usage after which image garbage collection is always run.
- image
Gc numberLow Threshold Percent - Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- image
Maximum stringGc Age - Defines the maximum age an image can be unused before it is garbage collected.
- image
Minimum stringGc Age - Defines the minimum age for an unused image before it is garbage collected.
- insecure
Kubelet stringReadonly Port Enabled - Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to
FALSE
. Possible values:TRUE
,FALSE
. - pod
Pids numberLimit - Controls the maximum number of processes allowed to run in a pod.
- allowed_
unsafe_ Sequence[str]sysctls - Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- container_
log_ intmax_ files - Defines the maximum number of container log files that can be present for a container.
- container_
log_ strmax_ size - Defines the maximum size of the container log file before it is rotated.
- cpu_
cfs_ boolquota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu_
cfs_ strquota_ period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpu_
manager_ strpolicy - Control the CPU management policy on the node.
- image_
gc_ inthigh_ threshold_ percent - Defines the percent of disk usage after which image garbage collection is always run.
- image_
gc_ intlow_ threshold_ percent - Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- image_
maximum_ strgc_ age - Defines the maximum age an image can be unused before it is garbage collected.
- image_
minimum_ strgc_ age - Defines the minimum age for an unused image before it is garbage collected.
- insecure_
kubelet_ strreadonly_ port_ enabled - Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to
FALSE
. Possible values:TRUE
,FALSE
. - pod_
pids_ intlimit - Controls the maximum number of processes allowed to run in a pod.
- allowed
Unsafe List<String>Sysctls - Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
- container
Log NumberMax Files - Defines the maximum number of container log files that can be present for a container.
- container
Log StringMax Size - Defines the maximum size of the container log file before it is rotated.
- cpu
Cfs BooleanQuota - Enable CPU CFS quota enforcement for containers that specify CPU limits.
- cpu
Cfs StringQuota Period - Set the CPU CFS quota period value 'cpu.cfs_period_us'.
- cpu
Manager StringPolicy - Control the CPU management policy on the node.
- image
Gc NumberHigh Threshold Percent - Defines the percent of disk usage after which image garbage collection is always run.
- image
Gc NumberLow Threshold Percent - Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
- image
Maximum StringGc Age - Defines the maximum age an image can be unused before it is garbage collected.
- image
Minimum StringGc Age - Defines the minimum age for an unused image before it is garbage collected.
- insecure
Kubelet StringReadonly Port Enabled - Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to
FALSE
. Possible values:TRUE
,FALSE
. - pod
Pids NumberLimit - Controls the maximum number of processes allowed to run in a pod.
NodePoolNodeConfigLinuxNodeConfig, NodePoolNodeConfigLinuxNodeConfigArgs
- Cgroup
Mode string - cgroupMode specifies the cgroup mode to be used on the node.
- Hugepages
Config NodePool Node Config Linux Node Config Hugepages Config - Amounts for 2M and 1G hugepages.
- Sysctls Dictionary<string, string>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- Cgroup
Mode string - cgroupMode specifies the cgroup mode to be used on the node.
- Hugepages
Config NodePool Node Config Linux Node Config Hugepages Config - Amounts for 2M and 1G hugepages.
- Sysctls map[string]string
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup
Mode String - cgroupMode specifies the cgroup mode to be used on the node.
- hugepages
Config NodePool Node Config Linux Node Config Hugepages Config - Amounts for 2M and 1G hugepages.
- sysctls Map<String,String>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup
Mode string - cgroupMode specifies the cgroup mode to be used on the node.
- hugepages
Config NodePool Node Config Linux Node Config Hugepages Config - Amounts for 2M and 1G hugepages.
- sysctls {[key: string]: string}
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup_
mode str - cgroupMode specifies the cgroup mode to be used on the node.
- hugepages_
config NodePool Node Config Linux Node Config Hugepages Config - Amounts for 2M and 1G hugepages.
- sysctls Mapping[str, str]
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
- cgroup
Mode String - cgroupMode specifies the cgroup mode to be used on the node.
- hugepages
Config Property Map - Amounts for 2M and 1G hugepages.
- sysctls Map<String>
- The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
NodePoolNodeConfigLinuxNodeConfigHugepagesConfig, NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs
- Hugepage
Size1g int - Amount of 1G hugepages.
- Hugepage
Size2m int - Amount of 2M hugepages.
- Hugepage
Size1g int - Amount of 1G hugepages.
- Hugepage
Size2m int - Amount of 2M hugepages.
- hugepage
Size1g Integer - Amount of 1G hugepages.
- hugepage
Size2m Integer - Amount of 2M hugepages.
- hugepage
Size1g number - Amount of 1G hugepages.
- hugepage
Size2m number - Amount of 2M hugepages.
- hugepage_
size1g int - Amount of 1G hugepages.
- hugepage_
size2m int - Amount of 2M hugepages.
- hugepage
Size1g Number - Amount of 1G hugepages.
- hugepage
Size2m Number - Amount of 2M hugepages.
NodePoolNodeConfigLocalNvmeSsdBlockConfig, NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs
- Local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- Local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local_
ssd_ count This property is required. Changes to this property will trigger replacement.
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
- local
Ssd Count This property is required. Changes to this property will trigger replacement.
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
NodePoolNodeConfigReservationAffinity, NodePoolNodeConfigReservationAffinityArgs
- Consume
Reservation Type This property is required. Changes to this property will trigger replacement.
- The type of reservation consumption
Accepted values are:
"UNSPECIFIED"
: Default value. This should not be used."NO_RESERVATION"
: Do not consume from any reserved capacity."ANY_RESERVATION"
: Consume any reservation available."SPECIFIC_RESERVATION"
: Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
- Key
Changes to this property will trigger replacement.
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- Values
Changes to this property will trigger replacement.
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- Consume
Reservation Type This property is required. Changes to this property will trigger replacement.
- The type of reservation consumption
Accepted values are:
"UNSPECIFIED"
: Default value. This should not be used."NO_RESERVATION"
: Do not consume from any reserved capacity."ANY_RESERVATION"
: Consume any reservation available."SPECIFIC_RESERVATION"
: Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
- Key
Changes to this property will trigger replacement.
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- Values
Changes to this property will trigger replacement.
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consume
Reservation Type This property is required. Changes to this property will trigger replacement.
- The type of reservation consumption
Accepted values are:
"UNSPECIFIED"
: Default value. This should not be used."NO_RESERVATION"
: Do not consume from any reserved capacity."ANY_RESERVATION"
: Consume any reservation available."SPECIFIC_RESERVATION"
: Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
- key
Changes to this property will trigger replacement.
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values
Changes to this property will trigger replacement.
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consume
Reservation Type This property is required. Changes to this property will trigger replacement.
- The type of reservation consumption
Accepted values are:
"UNSPECIFIED"
: Default value. This should not be used."NO_RESERVATION"
: Do not consume from any reserved capacity."ANY_RESERVATION"
: Consume any reservation available."SPECIFIC_RESERVATION"
: Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
- key
Changes to this property will trigger replacement.
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values
Changes to this property will trigger replacement.
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consume_
reservation_ type This property is required. Changes to this property will trigger replacement.
- The type of reservation consumption
Accepted values are:
"UNSPECIFIED"
: Default value. This should not be used."NO_RESERVATION"
: Do not consume from any reserved capacity."ANY_RESERVATION"
: Consume any reservation available."SPECIFIC_RESERVATION"
: Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
- key
Changes to this property will trigger replacement.
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values
Changes to this property will trigger replacement.
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
- consume
Reservation Type This property is required. Changes to this property will trigger replacement.
- The type of reservation consumption
Accepted values are:
"UNSPECIFIED"
: Default value. This should not be used."NO_RESERVATION"
: Do not consume from any reserved capacity."ANY_RESERVATION"
: Consume any reservation available."SPECIFIC_RESERVATION"
: Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
- key
Changes to this property will trigger replacement.
- The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
- values
Changes to this property will trigger replacement.
- The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
NodePoolNodeConfigSandboxConfig, NodePoolNodeConfigSandboxConfigArgs
- Sandbox
Type This property is required. string - Type of the sandbox to use for the node (e.g. 'gvisor')
- Sandbox
Type This property is required. string - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox
Type This property is required. String - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox
Type This property is required. string - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox_
type This property is required. str - Type of the sandbox to use for the node (e.g. 'gvisor')
- sandbox
Type This property is required. String - Type of the sandbox to use for the node (e.g. 'gvisor')
NodePoolNodeConfigSecondaryBootDisk, NodePoolNodeConfigSecondaryBootDiskArgs
- disk_
image This property is required. Changes to this property will trigger replacement.
- Disk image to create the secondary boot disk from
- mode
Changes to this property will trigger replacement.
- Mode for how the secondary boot disk is used.
NodePoolNodeConfigShieldedInstanceConfig, NodePoolNodeConfigShieldedInstanceConfigArgs
- Enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether the instance has integrity monitoring enabled.
- Enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether the instance has Secure Boot enabled.
- Enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether the instance has integrity monitoring enabled.
- Enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether the instance has Secure Boot enabled.
- enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether the instance has integrity monitoring enabled.
- enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether the instance has Secure Boot enabled.
- enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether the instance has integrity monitoring enabled.
- enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether the instance has Secure Boot enabled.
- enable_
integrity_ monitoring Changes to this property will trigger replacement.
- Defines whether the instance has integrity monitoring enabled.
- enable_
secure_ boot Changes to this property will trigger replacement.
- Defines whether the instance has Secure Boot enabled.
- enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether the instance has integrity monitoring enabled.
- enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether the instance has Secure Boot enabled.
NodePoolNodeConfigSoleTenantConfig, NodePoolNodeConfigSoleTenantConfigArgs
- Node
Affinities This property is required. Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config Node Affinity> - .
- Node
Affinities This property is required. Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config Node Affinity - .
- node
Affinities This property is required. Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config Node Affinity> - .
- node
Affinities This property is required. Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config Node Affinity[] - .
- node_
affinities This property is required. Changes to this property will trigger replacement.
Pool Node Config Sole Tenant Config Node Affinity] - .
- node
Affinities This property is required. Changes to this property will trigger replacement.
- .
NodePoolNodeConfigSoleTenantConfigNodeAffinity, NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs
NodePoolNodeConfigTaint, NodePoolNodeConfigTaintArgs
NodePoolNodeConfigWorkloadMetadataConfig, NodePoolNodeConfigWorkloadMetadataConfigArgs
- Mode
This property is required. string - Mode is the configuration for how to expose metadata to workloads running on the node.
- Mode
This property is required. string - Mode is the configuration for how to expose metadata to workloads running on the node.
- mode
This property is required. String - Mode is the configuration for how to expose metadata to workloads running on the node.
- mode
This property is required. string - Mode is the configuration for how to expose metadata to workloads running on the node.
- mode
This property is required. str - Mode is the configuration for how to expose metadata to workloads running on the node.
- mode
This property is required. String - Mode is the configuration for how to expose metadata to workloads running on the node.
NodePoolPlacementPolicy, NodePoolPlacementPolicyArgs
- Type
This property is required. string - The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Policy
Name Changes to this property will trigger replacement.
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- Tpu
Topology string - The TPU placement topology for pod slice node pool.
- Type
This property is required. string - The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- Policy
Name Changes to this property will trigger replacement.
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- Tpu
Topology string - The TPU placement topology for pod slice node pool.
- type
This property is required. String - The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy
Name Changes to this property will trigger replacement.
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu
Topology String - The TPU placement topology for pod slice node pool.
- type
This property is required. string - The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy
Name Changes to this property will trigger replacement.
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu
Topology string - The TPU placement topology for pod slice node pool.
- type
This property is required. str - The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy_
name Changes to this property will trigger replacement.
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu_
topology str - The TPU placement topology for pod slice node pool.
- type
This property is required. String - The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
- policy
Name Changes to this property will trigger replacement.
- If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
- tpu
Topology String - The TPU placement topology for pod slice node pool.
NodePoolQueuedProvisioning, NodePoolQueuedProvisioningArgs
- Enabled
This property is required. Changes to this property will trigger replacement.
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- Enabled
This property is required. Changes to this property will trigger replacement.
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled
This property is required. Changes to this property will trigger replacement.
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled
This property is required. Changes to this property will trigger replacement.
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled
This property is required. Changes to this property will trigger replacement.
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
- enabled
This property is required. Changes to this property will trigger replacement.
- Makes nodes obtainable through the ProvisioningRequest API exclusively.
NodePoolUpgradeSettings, NodePoolUpgradeSettingsArgs
- Blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- Max
Surge int - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- Strategy string
- The upgrade strategy to be used for upgrading the nodes.
- Blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- Max
Surge int - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- Strategy string
- The upgrade strategy to be used for upgrading the nodes.
- blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- max
Surge Integer - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - Integer
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy String
- The upgrade strategy to be used for upgrading the nodes.
- blue
Green NodeSettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- max
Surge number - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - number
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy string
- The upgrade strategy to be used for upgrading the nodes.
- blue_
green_ Nodesettings Pool Upgrade Settings Blue Green Settings - The settings to adjust blue green upgrades. Structure is documented below
- max_
surge int - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - int
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy str
- The upgrade strategy to be used for upgrading the nodes.
- blue
Green Property MapSettings - The settings to adjust blue green upgrades. Structure is documented below
- max
Surge Number - The number of additional nodes that can be added to the node pool during
an upgrade. Increasing
max_surge
raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. - Number
The number of nodes that can be simultaneously unavailable during an upgrade. Increasing
max_unavailable
raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.max_surge
andmax_unavailable
must not be negative and at least one of them must be greater than zero.- strategy String
- The upgrade strategy to be used for upgrading the nodes.
NodePoolUpgradeSettingsBlueGreenSettings, NodePoolUpgradeSettingsBlueGreenSettingsArgs
- Standard
Rollout Policy This property is required. NodePool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- Node
Pool stringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- Standard
Rollout Policy This property is required. NodePool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- Node
Pool stringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout Policy This property is required. NodePool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- node
Pool StringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout Policy This property is required. NodePool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- node
Pool stringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard_
rollout_ policy This property is required. NodePool Upgrade Settings Blue Green Settings Standard Rollout Policy - Specifies the standard policy settings for blue-green upgrades.
- node_
pool_ strsoak_ duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
- standard
Rollout Policy This property is required. Property Map - Specifies the standard policy settings for blue-green upgrades.
- node
Pool StringSoak Duration - Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy, NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs
- Batch
Node intCount - Number of blue nodes to drain in a batch.
- Batch
Percentage double - Percentage of the blue pool nodes to drain in a batch.
- Batch
Soak stringDuration - Soak time after each batch gets drained.
- Batch
Node intCount - Number of blue nodes to drain in a batch.
- Batch
Percentage float64 - Percentage of the blue pool nodes to drain in a batch.
- Batch
Soak stringDuration - Soak time after each batch gets drained.
- batch
Node IntegerCount - Number of blue nodes to drain in a batch.
- batch
Percentage Double - Percentage of the blue pool nodes to drain in a batch.
- batch
Soak StringDuration - Soak time after each batch gets drained.
- batch
Node numberCount - Number of blue nodes to drain in a batch.
- batch
Percentage number - Percentage of the blue pool nodes to drain in a batch.
- batch
Soak stringDuration - Soak time after each batch gets drained.
- batch_
node_ intcount - Number of blue nodes to drain in a batch.
- batch_
percentage float - Percentage of the blue pool nodes to drain in a batch.
- batch_
soak_ strduration - Soak time after each batch gets drained.
- batch
Node NumberCount - Number of blue nodes to drain in a batch.
- batch
Percentage Number - Percentage of the blue pool nodes to drain in a batch.
- batch
Soak StringDuration - Soak time after each batch gets drained.
Import
Node pools can be imported using the project
, location
, cluster
and name
. If
the project is omitted, the project value in the provider configuration will be used. Examples:
{{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
{{location}}/{{cluster_id}}/{{pool_id}}
When using the pulumi import
command, node pools can be imported using one of the formats above. For example:
$ pulumi import gcp:container/nodePool:NodePool default {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
$ pulumi import gcp:container/nodePool:NodePool default {{location}}/{{cluster_id}}/{{pool_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.