gcp.managedkafka.Connector
Explore with Pulumi AI
Example Usage
Managedkafka Connector Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const mkcNetwork = new gcp.compute.Network("mkc_network", {
name: "my-network-0",
autoCreateSubnetworks: false,
});
const mkcSubnet = new gcp.compute.Subnetwork("mkc_subnet", {
name: "my-subnetwork-0",
ipCidrRange: "10.4.0.0/16",
region: "us-central1",
network: mkcNetwork.id,
});
const mkcAdditionalSubnet = new gcp.compute.Subnetwork("mkc_additional_subnet", {
name: "my-additional-subnetwork-0",
ipCidrRange: "10.5.0.0/16",
region: "us-central1",
network: mkcNetwork.id,
});
const cpsTopic = new gcp.pubsub.Topic("cps_topic", {
name: "my-cps-topic",
messageRetentionDuration: "86600s",
});
const project = gcp.organizations.getProject({});
const gmkCluster = new gcp.managedkafka.Cluster("gmk_cluster", {
clusterId: "my-cluster",
location: "us-central1",
capacityConfig: {
vcpuCount: "3",
memoryBytes: "3221225472",
},
gcpConfig: {
accessConfig: {
networkConfigs: [{
subnet: pulumi.all([project, mkcSubnet.id]).apply(([project, id]) => `projects/${project.projectId}/regions/us-central1/subnetworks/${id}`),
}],
},
},
});
const gmkTopic = new gcp.managedkafka.Topic("gmk_topic", {
topicId: "my-topic",
cluster: gmkCluster.clusterId,
location: "us-central1",
partitionCount: 2,
replicationFactor: 3,
});
const mkcCluster = new gcp.managedkafka.ConnectCluster("mkc_cluster", {
connectClusterId: "my-connect-cluster",
kafkaCluster: pulumi.all([project, gmkCluster.clusterId]).apply(([project, clusterId]) => `projects/${project.projectId}/locations/us-central1/clusters/${clusterId}`),
location: "us-central1",
capacityConfig: {
vcpuCount: "12",
memoryBytes: "21474836480",
},
gcpConfig: {
accessConfig: {
networkConfigs: [{
primarySubnet: pulumi.all([project, mkcSubnet.id]).apply(([project, id]) => `projects/${project.projectId}/regions/us-central1/subnetworks/${id}`),
additionalSubnets: [mkcAdditionalSubnet.id],
dnsDomainNames: [pulumi.all([gmkCluster.clusterId, project]).apply(([clusterId, project]) => `${clusterId}.us-central1.managedkafka-staging.${project.projectId}.cloud-staging.goog`)],
}],
},
},
labels: {
key: "value",
},
});
const example = new gcp.managedkafka.Connector("example", {
connectorId: "my-connector",
connectCluster: mkcCluster.connectClusterId,
location: "us-central1",
configs: {
"connector.class": "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector",
name: "my-connector",
"tasks.max": "1",
topics: gmkTopic.topicId,
"cps.topic": cpsTopic.name,
"cps.project": project.then(project => project.projectId),
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
},
taskRestartPolicy: {
minimumBackoff: "60s",
maximumBackoff: "1800s",
},
});
import pulumi
import pulumi_gcp as gcp
mkc_network = gcp.compute.Network("mkc_network",
name="my-network-0",
auto_create_subnetworks=False)
mkc_subnet = gcp.compute.Subnetwork("mkc_subnet",
name="my-subnetwork-0",
ip_cidr_range="10.4.0.0/16",
region="us-central1",
network=mkc_network.id)
mkc_additional_subnet = gcp.compute.Subnetwork("mkc_additional_subnet",
name="my-additional-subnetwork-0",
ip_cidr_range="10.5.0.0/16",
region="us-central1",
network=mkc_network.id)
cps_topic = gcp.pubsub.Topic("cps_topic",
name="my-cps-topic",
message_retention_duration="86600s")
project = gcp.organizations.get_project()
gmk_cluster = gcp.managedkafka.Cluster("gmk_cluster",
cluster_id="my-cluster",
location="us-central1",
capacity_config={
"vcpu_count": "3",
"memory_bytes": "3221225472",
},
gcp_config={
"access_config": {
"network_configs": [{
"subnet": mkc_subnet.id.apply(lambda id: f"projects/{project.project_id}/regions/us-central1/subnetworks/{id}"),
}],
},
})
gmk_topic = gcp.managedkafka.Topic("gmk_topic",
topic_id="my-topic",
cluster=gmk_cluster.cluster_id,
location="us-central1",
partition_count=2,
replication_factor=3)
mkc_cluster = gcp.managedkafka.ConnectCluster("mkc_cluster",
connect_cluster_id="my-connect-cluster",
kafka_cluster=gmk_cluster.cluster_id.apply(lambda cluster_id: f"projects/{project.project_id}/locations/us-central1/clusters/{cluster_id}"),
location="us-central1",
capacity_config={
"vcpu_count": "12",
"memory_bytes": "21474836480",
},
gcp_config={
"access_config": {
"network_configs": [{
"primary_subnet": mkc_subnet.id.apply(lambda id: f"projects/{project.project_id}/regions/us-central1/subnetworks/{id}"),
"additional_subnets": [mkc_additional_subnet.id],
"dns_domain_names": [gmk_cluster.cluster_id.apply(lambda cluster_id: f"{cluster_id}.us-central1.managedkafka-staging.{project.project_id}.cloud-staging.goog")],
}],
},
},
labels={
"key": "value",
})
example = gcp.managedkafka.Connector("example",
connector_id="my-connector",
connect_cluster=mkc_cluster.connect_cluster_id,
location="us-central1",
configs={
"connector.class": "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector",
"name": "my-connector",
"tasks.max": "1",
"topics": gmk_topic.topic_id,
"cps.topic": cps_topic.name,
"cps.project": project.project_id,
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
},
task_restart_policy={
"minimum_backoff": "60s",
"maximum_backoff": "1800s",
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/managedkafka"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
mkcNetwork, err := compute.NewNetwork(ctx, "mkc_network", &compute.NetworkArgs{
Name: pulumi.String("my-network-0"),
AutoCreateSubnetworks: pulumi.Bool(false),
})
if err != nil {
return err
}
mkcSubnet, err := compute.NewSubnetwork(ctx, "mkc_subnet", &compute.SubnetworkArgs{
Name: pulumi.String("my-subnetwork-0"),
IpCidrRange: pulumi.String("10.4.0.0/16"),
Region: pulumi.String("us-central1"),
Network: mkcNetwork.ID(),
})
if err != nil {
return err
}
mkcAdditionalSubnet, err := compute.NewSubnetwork(ctx, "mkc_additional_subnet", &compute.SubnetworkArgs{
Name: pulumi.String("my-additional-subnetwork-0"),
IpCidrRange: pulumi.String("10.5.0.0/16"),
Region: pulumi.String("us-central1"),
Network: mkcNetwork.ID(),
})
if err != nil {
return err
}
cpsTopic, err := pubsub.NewTopic(ctx, "cps_topic", &pubsub.TopicArgs{
Name: pulumi.String("my-cps-topic"),
MessageRetentionDuration: pulumi.String("86600s"),
})
if err != nil {
return err
}
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
gmkCluster, err := managedkafka.NewCluster(ctx, "gmk_cluster", &managedkafka.ClusterArgs{
ClusterId: pulumi.String("my-cluster"),
Location: pulumi.String("us-central1"),
CapacityConfig: &managedkafka.ClusterCapacityConfigArgs{
VcpuCount: pulumi.String("3"),
MemoryBytes: pulumi.String("3221225472"),
},
GcpConfig: &managedkafka.ClusterGcpConfigArgs{
AccessConfig: &managedkafka.ClusterGcpConfigAccessConfigArgs{
NetworkConfigs: managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArray{
&managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArgs{
Subnet: mkcSubnet.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/%v", project.ProjectId, id), nil
}).(pulumi.StringOutput),
},
},
},
},
})
if err != nil {
return err
}
gmkTopic, err := managedkafka.NewTopic(ctx, "gmk_topic", &managedkafka.TopicArgs{
TopicId: pulumi.String("my-topic"),
Cluster: gmkCluster.ClusterId,
Location: pulumi.String("us-central1"),
PartitionCount: pulumi.Int(2),
ReplicationFactor: pulumi.Int(3),
})
if err != nil {
return err
}
mkcCluster, err := managedkafka.NewConnectCluster(ctx, "mkc_cluster", &managedkafka.ConnectClusterArgs{
ConnectClusterId: pulumi.String("my-connect-cluster"),
KafkaCluster: gmkCluster.ClusterId.ApplyT(func(clusterId string) (string, error) {
return fmt.Sprintf("projects/%v/locations/us-central1/clusters/%v", project.ProjectId, clusterId), nil
}).(pulumi.StringOutput),
Location: pulumi.String("us-central1"),
CapacityConfig: &managedkafka.ConnectClusterCapacityConfigArgs{
VcpuCount: pulumi.String("12"),
MemoryBytes: pulumi.String("21474836480"),
},
GcpConfig: &managedkafka.ConnectClusterGcpConfigArgs{
AccessConfig: &managedkafka.ConnectClusterGcpConfigAccessConfigArgs{
NetworkConfigs: managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfigArray{
&managedkafka.ConnectClusterGcpConfigAccessConfigNetworkConfigArgs{
PrimarySubnet: mkcSubnet.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/%v", project.ProjectId, id), nil
}).(pulumi.StringOutput),
AdditionalSubnets: pulumi.StringArray{
mkcAdditionalSubnet.ID(),
},
DnsDomainNames: pulumi.StringArray{
gmkCluster.ClusterId.ApplyT(func(clusterId string) (string, error) {
return fmt.Sprintf("%v.us-central1.managedkafka-staging.%v.cloud-staging.goog", clusterId, project.ProjectId), nil
}).(pulumi.StringOutput),
},
},
},
},
},
Labels: pulumi.StringMap{
"key": pulumi.String("value"),
},
})
if err != nil {
return err
}
_, err = managedkafka.NewConnector(ctx, "example", &managedkafka.ConnectorArgs{
ConnectorId: pulumi.String("my-connector"),
ConnectCluster: mkcCluster.ConnectClusterId,
Location: pulumi.String("us-central1"),
Configs: pulumi.StringMap{
"connector.class": pulumi.String("com.google.pubsub.kafka.sink.CloudPubSubSinkConnector"),
"name": pulumi.String("my-connector"),
"tasks.max": pulumi.String("1"),
"topics": gmkTopic.TopicId,
"cps.topic": cpsTopic.Name,
"cps.project": pulumi.String(project.ProjectId),
"value.converter": pulumi.String("org.apache.kafka.connect.storage.StringConverter"),
"key.converter": pulumi.String("org.apache.kafka.connect.storage.StringConverter"),
},
TaskRestartPolicy: &managedkafka.ConnectorTaskRestartPolicyArgs{
MinimumBackoff: pulumi.String("60s"),
MaximumBackoff: pulumi.String("1800s"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var mkcNetwork = new Gcp.Compute.Network("mkc_network", new()
{
Name = "my-network-0",
AutoCreateSubnetworks = false,
});
var mkcSubnet = new Gcp.Compute.Subnetwork("mkc_subnet", new()
{
Name = "my-subnetwork-0",
IpCidrRange = "10.4.0.0/16",
Region = "us-central1",
Network = mkcNetwork.Id,
});
var mkcAdditionalSubnet = new Gcp.Compute.Subnetwork("mkc_additional_subnet", new()
{
Name = "my-additional-subnetwork-0",
IpCidrRange = "10.5.0.0/16",
Region = "us-central1",
Network = mkcNetwork.Id,
});
var cpsTopic = new Gcp.PubSub.Topic("cps_topic", new()
{
Name = "my-cps-topic",
MessageRetentionDuration = "86600s",
});
var project = Gcp.Organizations.GetProject.Invoke();
var gmkCluster = new Gcp.ManagedKafka.Cluster("gmk_cluster", new()
{
ClusterId = "my-cluster",
Location = "us-central1",
CapacityConfig = new Gcp.ManagedKafka.Inputs.ClusterCapacityConfigArgs
{
VcpuCount = "3",
MemoryBytes = "3221225472",
},
GcpConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigArgs
{
AccessConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigArgs
{
NetworkConfigs = new[]
{
new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigNetworkConfigArgs
{
Subnet = Output.Tuple(project, mkcSubnet.Id).Apply(values =>
{
var project = values.Item1;
var id = values.Item2;
return $"projects/{project.Apply(getProjectResult => getProjectResult.ProjectId)}/regions/us-central1/subnetworks/{id}";
}),
},
},
},
},
});
var gmkTopic = new Gcp.ManagedKafka.Topic("gmk_topic", new()
{
TopicId = "my-topic",
Cluster = gmkCluster.ClusterId,
Location = "us-central1",
PartitionCount = 2,
ReplicationFactor = 3,
});
var mkcCluster = new Gcp.ManagedKafka.ConnectCluster("mkc_cluster", new()
{
ConnectClusterId = "my-connect-cluster",
KafkaCluster = Output.Tuple(project, gmkCluster.ClusterId).Apply(values =>
{
var project = values.Item1;
var clusterId = values.Item2;
return $"projects/{project.Apply(getProjectResult => getProjectResult.ProjectId)}/locations/us-central1/clusters/{clusterId}";
}),
Location = "us-central1",
CapacityConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterCapacityConfigArgs
{
VcpuCount = "12",
MemoryBytes = "21474836480",
},
GcpConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigArgs
{
AccessConfig = new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigAccessConfigArgs
{
NetworkConfigs = new[]
{
new Gcp.ManagedKafka.Inputs.ConnectClusterGcpConfigAccessConfigNetworkConfigArgs
{
PrimarySubnet = Output.Tuple(project, mkcSubnet.Id).Apply(values =>
{
var project = values.Item1;
var id = values.Item2;
return $"projects/{project.Apply(getProjectResult => getProjectResult.ProjectId)}/regions/us-central1/subnetworks/{id}";
}),
AdditionalSubnets = new[]
{
mkcAdditionalSubnet.Id,
},
DnsDomainNames = new[]
{
Output.Tuple(gmkCluster.ClusterId, project).Apply(values =>
{
var clusterId = values.Item1;
var project = values.Item2;
return $"{clusterId}.us-central1.managedkafka-staging.{project.Apply(getProjectResult => getProjectResult.ProjectId)}.cloud-staging.goog";
}),
},
},
},
},
},
Labels =
{
{ "key", "value" },
},
});
var example = new Gcp.ManagedKafka.Connector("example", new()
{
ConnectorId = "my-connector",
ConnectCluster = mkcCluster.ConnectClusterId,
Location = "us-central1",
Configs =
{
{ "connector.class", "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector" },
{ "name", "my-connector" },
{ "tasks.max", "1" },
{ "topics", gmkTopic.TopicId },
{ "cps.topic", cpsTopic.Name },
{ "cps.project", project.Apply(getProjectResult => getProjectResult.ProjectId) },
{ "value.converter", "org.apache.kafka.connect.storage.StringConverter" },
{ "key.converter", "org.apache.kafka.connect.storage.StringConverter" },
},
TaskRestartPolicy = new Gcp.ManagedKafka.Inputs.ConnectorTaskRestartPolicyArgs
{
MinimumBackoff = "60s",
MaximumBackoff = "1800s",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Network;
import com.pulumi.gcp.compute.NetworkArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.pubsub.Topic;
import com.pulumi.gcp.pubsub.TopicArgs;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.managedkafka.Cluster;
import com.pulumi.gcp.managedkafka.ClusterArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterCapacityConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigAccessConfigArgs;
import com.pulumi.gcp.managedkafka.Topic;
import com.pulumi.gcp.managedkafka.TopicArgs;
import com.pulumi.gcp.managedkafka.ConnectCluster;
import com.pulumi.gcp.managedkafka.ConnectClusterArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterCapacityConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterGcpConfigArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectClusterGcpConfigAccessConfigArgs;
import com.pulumi.gcp.managedkafka.Connector;
import com.pulumi.gcp.managedkafka.ConnectorArgs;
import com.pulumi.gcp.managedkafka.inputs.ConnectorTaskRestartPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var mkcNetwork = new Network("mkcNetwork", NetworkArgs.builder()
.name("my-network-0")
.autoCreateSubnetworks(false)
.build());
var mkcSubnet = new Subnetwork("mkcSubnet", SubnetworkArgs.builder()
.name("my-subnetwork-0")
.ipCidrRange("10.4.0.0/16")
.region("us-central1")
.network(mkcNetwork.id())
.build());
var mkcAdditionalSubnet = new Subnetwork("mkcAdditionalSubnet", SubnetworkArgs.builder()
.name("my-additional-subnetwork-0")
.ipCidrRange("10.5.0.0/16")
.region("us-central1")
.network(mkcNetwork.id())
.build());
var cpsTopic = new Topic("cpsTopic", TopicArgs.builder()
.name("my-cps-topic")
.messageRetentionDuration("86600s")
.build());
final var project = OrganizationsFunctions.getProject();
var gmkCluster = new Cluster("gmkCluster", ClusterArgs.builder()
.clusterId("my-cluster")
.location("us-central1")
.capacityConfig(ClusterCapacityConfigArgs.builder()
.vcpuCount(3)
.memoryBytes(3221225472)
.build())
.gcpConfig(ClusterGcpConfigArgs.builder()
.accessConfig(ClusterGcpConfigAccessConfigArgs.builder()
.networkConfigs(ClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
.subnet(mkcSubnet.id().applyValue(id -> String.format("projects/%s/regions/us-central1/subnetworks/%s", project.applyValue(getProjectResult -> getProjectResult.projectId()),id)))
.build())
.build())
.build())
.build());
var gmkTopic = new Topic("gmkTopic", TopicArgs.builder()
.topicId("my-topic")
.cluster(gmkCluster.clusterId())
.location("us-central1")
.partitionCount(2)
.replicationFactor(3)
.build());
var mkcCluster = new ConnectCluster("mkcCluster", ConnectClusterArgs.builder()
.connectClusterId("my-connect-cluster")
.kafkaCluster(gmkCluster.clusterId().applyValue(clusterId -> String.format("projects/%s/locations/us-central1/clusters/%s", project.applyValue(getProjectResult -> getProjectResult.projectId()),clusterId)))
.location("us-central1")
.capacityConfig(ConnectClusterCapacityConfigArgs.builder()
.vcpuCount(12)
.memoryBytes(21474836480)
.build())
.gcpConfig(ConnectClusterGcpConfigArgs.builder()
.accessConfig(ConnectClusterGcpConfigAccessConfigArgs.builder()
.networkConfigs(ConnectClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
.primarySubnet(mkcSubnet.id().applyValue(id -> String.format("projects/%s/regions/us-central1/subnetworks/%s", project.applyValue(getProjectResult -> getProjectResult.projectId()),id)))
.additionalSubnets(mkcAdditionalSubnet.id())
.dnsDomainNames(gmkCluster.clusterId().applyValue(clusterId -> String.format("%s.us-central1.managedkafka-staging.%s.cloud-staging.goog", clusterId,project.applyValue(getProjectResult -> getProjectResult.projectId()))))
.build())
.build())
.build())
.labels(Map.of("key", "value"))
.build());
var example = new Connector("example", ConnectorArgs.builder()
.connectorId("my-connector")
.connectCluster(mkcCluster.connectClusterId())
.location("us-central1")
.configs(Map.ofEntries(
Map.entry("connector.class", "com.google.pubsub.kafka.sink.CloudPubSubSinkConnector"),
Map.entry("name", "my-connector"),
Map.entry("tasks.max", "1"),
Map.entry("topics", gmkTopic.topicId()),
Map.entry("cps.topic", cpsTopic.name()),
Map.entry("cps.project", project.applyValue(getProjectResult -> getProjectResult.projectId())),
Map.entry("value.converter", "org.apache.kafka.connect.storage.StringConverter"),
Map.entry("key.converter", "org.apache.kafka.connect.storage.StringConverter")
))
.taskRestartPolicy(ConnectorTaskRestartPolicyArgs.builder()
.minimumBackoff("60s")
.maximumBackoff("1800s")
.build())
.build());
}
}
resources:
mkcNetwork:
type: gcp:compute:Network
name: mkc_network
properties:
name: my-network-0
autoCreateSubnetworks: false
mkcSubnet:
type: gcp:compute:Subnetwork
name: mkc_subnet
properties:
name: my-subnetwork-0
ipCidrRange: 10.4.0.0/16
region: us-central1
network: ${mkcNetwork.id}
mkcAdditionalSubnet:
type: gcp:compute:Subnetwork
name: mkc_additional_subnet
properties:
name: my-additional-subnetwork-0
ipCidrRange: 10.5.0.0/16
region: us-central1
network: ${mkcNetwork.id}
cpsTopic:
type: gcp:pubsub:Topic
name: cps_topic
properties:
name: my-cps-topic
messageRetentionDuration: 86600s
gmkCluster:
type: gcp:managedkafka:Cluster
name: gmk_cluster
properties:
clusterId: my-cluster
location: us-central1
capacityConfig:
vcpuCount: 3
memoryBytes: 3.221225472e+09
gcpConfig:
accessConfig:
networkConfigs:
- subnet: projects/${project.projectId}/regions/us-central1/subnetworks/${mkcSubnet.id}
gmkTopic:
type: gcp:managedkafka:Topic
name: gmk_topic
properties:
topicId: my-topic
cluster: ${gmkCluster.clusterId}
location: us-central1
partitionCount: 2
replicationFactor: 3
mkcCluster:
type: gcp:managedkafka:ConnectCluster
name: mkc_cluster
properties:
connectClusterId: my-connect-cluster
kafkaCluster: projects/${project.projectId}/locations/us-central1/clusters/${gmkCluster.clusterId}
location: us-central1
capacityConfig:
vcpuCount: 12
memoryBytes: 2.147483648e+10
gcpConfig:
accessConfig:
networkConfigs:
- primarySubnet: projects/${project.projectId}/regions/us-central1/subnetworks/${mkcSubnet.id}
additionalSubnets:
- ${mkcAdditionalSubnet.id}
dnsDomainNames:
- ${gmkCluster.clusterId}.us-central1.managedkafka-staging.${project.projectId}.cloud-staging.goog
labels:
key: value
example:
type: gcp:managedkafka:Connector
properties:
connectorId: my-connector
connectCluster: ${mkcCluster.connectClusterId}
location: us-central1
configs:
connector.class: com.google.pubsub.kafka.sink.CloudPubSubSinkConnector
name: my-connector
tasks.max: '1'
topics: ${gmkTopic.topicId}
cps.topic: ${cpsTopic.name}
cps.project: ${project.projectId}
value.converter: org.apache.kafka.connect.storage.StringConverter
key.converter: org.apache.kafka.connect.storage.StringConverter
taskRestartPolicy:
minimumBackoff: 60s
maximumBackoff: 1800s
variables:
project:
fn::invoke:
function: gcp:organizations:getProject
arguments: {}
Create Connector Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Connector(name: string, args: ConnectorArgs, opts?: CustomResourceOptions);
@overload
def Connector(resource_name: str,
args: ConnectorArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Connector(resource_name: str,
opts: Optional[ResourceOptions] = None,
connect_cluster: Optional[str] = None,
connector_id: Optional[str] = None,
location: Optional[str] = None,
configs: Optional[Mapping[str, str]] = None,
project: Optional[str] = None,
task_restart_policy: Optional[ConnectorTaskRestartPolicyArgs] = None)
func NewConnector(ctx *Context, name string, args ConnectorArgs, opts ...ResourceOption) (*Connector, error)
public Connector(string name, ConnectorArgs args, CustomResourceOptions? opts = null)
public Connector(String name, ConnectorArgs args)
public Connector(String name, ConnectorArgs args, CustomResourceOptions options)
type: gcp:managedkafka:Connector
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var connectorResource = new Gcp.ManagedKafka.Connector("connectorResource", new()
{
ConnectCluster = "string",
ConnectorId = "string",
Location = "string",
Configs =
{
{ "string", "string" },
},
Project = "string",
TaskRestartPolicy = new Gcp.ManagedKafka.Inputs.ConnectorTaskRestartPolicyArgs
{
MaximumBackoff = "string",
MinimumBackoff = "string",
},
});
example, err := managedkafka.NewConnector(ctx, "connectorResource", &managedkafka.ConnectorArgs{
ConnectCluster: pulumi.String("string"),
ConnectorId: pulumi.String("string"),
Location: pulumi.String("string"),
Configs: pulumi.StringMap{
"string": pulumi.String("string"),
},
Project: pulumi.String("string"),
TaskRestartPolicy: &managedkafka.ConnectorTaskRestartPolicyArgs{
MaximumBackoff: pulumi.String("string"),
MinimumBackoff: pulumi.String("string"),
},
})
var connectorResource = new Connector("connectorResource", ConnectorArgs.builder()
.connectCluster("string")
.connectorId("string")
.location("string")
.configs(Map.of("string", "string"))
.project("string")
.taskRestartPolicy(ConnectorTaskRestartPolicyArgs.builder()
.maximumBackoff("string")
.minimumBackoff("string")
.build())
.build());
connector_resource = gcp.managedkafka.Connector("connectorResource",
connect_cluster="string",
connector_id="string",
location="string",
configs={
"string": "string",
},
project="string",
task_restart_policy={
"maximum_backoff": "string",
"minimum_backoff": "string",
})
const connectorResource = new gcp.managedkafka.Connector("connectorResource", {
connectCluster: "string",
connectorId: "string",
location: "string",
configs: {
string: "string",
},
project: "string",
taskRestartPolicy: {
maximumBackoff: "string",
minimumBackoff: "string",
},
});
type: gcp:managedkafka:Connector
properties:
configs:
string: string
connectCluster: string
connectorId: string
location: string
project: string
taskRestartPolicy:
maximumBackoff: string
minimumBackoff: string
Connector Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Connector resource accepts the following input properties:
- Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Configs Dictionary<string, string>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Configs map[string]string
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Task
Restart ConnectorPolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs Map<String,String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect
Cluster string - The connect cluster name.
- connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs {[key: string]: string}
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect_
cluster str - The connect cluster name.
- connector_
id str - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location str
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs Mapping[str, str]
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task_
restart_ Connectorpolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- configs Map<String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- task
Restart Property MapPolicy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Connector resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state str
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
.
Look up Existing Connector Resource
Get an existing Connector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ConnectorState, opts?: CustomResourceOptions): Connector
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
configs: Optional[Mapping[str, str]] = None,
connect_cluster: Optional[str] = None,
connector_id: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
state: Optional[str] = None,
task_restart_policy: Optional[ConnectorTaskRestartPolicyArgs] = None) -> Connector
func GetConnector(ctx *Context, name string, id IDInput, state *ConnectorState, opts ...ResourceOption) (*Connector, error)
public static Connector Get(string name, Input<string> id, ConnectorState? state, CustomResourceOptions? opts = null)
public static Connector get(String name, Output<String> id, ConnectorState state, CustomResourceOptions options)
resources: _: type: gcp:managedkafka:Connector get: id: ${id}
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Configs Dictionary<string, string>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - Task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- Configs map[string]string
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - Connect
Cluster string - The connect cluster name.
- Connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - Location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- Name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- State string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - Task
Restart ConnectorPolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs Map<String,String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs {[key: string]: string}
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect
Cluster string - The connect cluster name.
- connector
Id string - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location string
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name string
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state string
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task
Restart ConnectorPolicy Task Restart Policy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs Mapping[str, str]
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect_
cluster str - The connect cluster name.
- connector_
id str - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location str
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name str
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state str
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task_
restart_ Connectorpolicy Task Restart Policy Args - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
- configs Map<String>
- Connector config as keys/values. The keys of the map are connector property names, for example:
connector.class
,tasks.max
,key.converter
. - connect
Cluster String - The connect cluster name.
- connector
Id String - The ID to use for the connector, which will become the final component of the connector's name. This value is structured like:
my-connector-id
. - location String
- ID of the location of the Kafka Connect resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
- name String
- The name of the connector. The
connector
segment is used when connecting directly to the connect cluster. Structured like:projects/PROJECT_ID/locations/LOCATION/connectClusters/CONNECT_CLUSTER/connectors/CONNECTOR_ID
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- state String
- The current state of the connect. Possible values:
STATE_UNSPECIFIED
,UNASSIGNED
,RUNNING
,PAUSED
,FAILED
,RESTARTING
, andSTOPPED
. - task
Restart Property MapPolicy - A policy that specifies how to restart the failed connectors/tasks in a Cluster resource. If not set, the failed connectors/tasks won't be restarted. Structure is documented below.
Supporting Types
ConnectorTaskRestartPolicy, ConnectorTaskRestartPolicyArgs
- Maximum
Backoff string - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- Minimum
Backoff string - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- Maximum
Backoff string - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- Minimum
Backoff string - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum
Backoff String - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum
Backoff String - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum
Backoff string - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum
Backoff string - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum_
backoff str - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum_
backoff str - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- maximum
Backoff String - The maximum amount of time to wait before retrying a failed task. This sets an upper bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
- minimum
Backoff String - The minimum amount of time to wait before retrying a failed task. This sets a lower bound for the backoff delay. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".
Import
Connector can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/connectClusters/{{connect_cluster}}/connectors/{{connector_id}}
{{project}}/{{location}}/{{connect_cluster}}/{{connector_id}}
{{location}}/{{connect_cluster}}/{{connector_id}}
When using the pulumi import
command, Connector can be imported using one of the formats above. For example:
$ pulumi import gcp:managedkafka/connector:Connector default projects/{{project}}/locations/{{location}}/connectClusters/{{connect_cluster}}/connectors/{{connector_id}}
$ pulumi import gcp:managedkafka/connector:Connector default {{project}}/{{location}}/{{connect_cluster}}/{{connector_id}}
$ pulumi import gcp:managedkafka/connector:Connector default {{location}}/{{connect_cluster}}/{{connector_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.