diff --git a/go.mod b/go.mod index e008067..9066058 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.18 require ( github.com/hashicorp/terraform-plugin-docs v0.13.0 - github.com/hashicorp/terraform-plugin-framework v0.15.0 + github.com/hashicorp/terraform-plugin-framework v0.16.0 github.com/hashicorp/terraform-plugin-go v0.14.2 github.com/hashicorp/terraform-plugin-log v0.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 diff --git a/go.sum b/go.sum index c46d9cc..a4f661e 100644 --- a/go.sum +++ b/go.sum @@ -161,8 +161,8 @@ github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY= github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ= -github.com/hashicorp/terraform-plugin-framework v0.15.0 h1:6f4UY2yfp5UsSX9JhUA6RSptjd+ojStBGWA4jrPhB6Q= -github.com/hashicorp/terraform-plugin-framework v0.15.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= +github.com/hashicorp/terraform-plugin-framework v0.16.0 h1:kEHh0d6dp5Ig/ey6PYXkWDZPMLIW8Me41T/Oa7bpO4s= +github.com/hashicorp/terraform-plugin-framework v0.16.0/go.mod h1:Vk5MuIJoE1qksHZawAZr6psx6YXsQBFIKDrWbROrwus= github.com/hashicorp/terraform-plugin-go v0.14.2 h1:rhsVEOGCnY04msNymSvbUsXfRLKh9znXZmHlf5e8mhE= github.com/hashicorp/terraform-plugin-go v0.14.2/go.mod h1:Q12UjumPNGiFsZffxOsA40Tlz1WVXt2Evh865Zj0+UA= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 819228b..38cc688 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -176,7 +176,7 @@ func (p *KafkaProvider) Configure(ctx context.Context, req provider.ConfigureReq boostrapServers := strings.Split(bootstrapServersString, ",") boostrapServer := boostrapServers[0] // Select the first server on the list if len(config.BootstrapServers) > 0 { - boostrapServer = config.BootstrapServers[0].Value + boostrapServer = config.BootstrapServers[0].ValueString() } // We only require 1 server brokerConfig.BrokerAddr = boostrapServer @@ -185,7 +185,7 @@ func (p *KafkaProvider) Configure(ctx context.Context, req provider.ConfigureReq // SASL configuration saslConfigEnabled := p.getEnvBool("SASL_ENABLED", true) if !config.SASL.Enabled.IsNull() { - saslConfigEnabled = config.SASL.Enabled.Value + saslConfigEnabled = config.SASL.Enabled.ValueBool() } if saslConfigEnabled { saslConfig, err := p.generateSASLConfig(ctx, config.SASL, resp) @@ -197,13 +197,13 @@ func (p *KafkaProvider) Configure(ctx context.Context, req provider.ConfigureReq } // Configure TLS settings - brokerConfig.TLS.Enabled = config.TLS.Enabled.Value - brokerConfig.TLS.SkipVerify = config.TLS.SkipVerify.Value + brokerConfig.TLS.Enabled = config.TLS.Enabled.ValueBool() + brokerConfig.TLS.SkipVerify = config.TLS.SkipVerify.ValueBool() // Configure timeout defaultTimeout := int64(p.getEnvInt("TIMEOUT", 300)) if !config.Timeout.IsNull() { - defaultTimeout = config.Timeout.Value + defaultTimeout = config.Timeout.ValueInt64() } kafkaClientTimeout := time.Second * time.Duration(defaultTimeout) @@ -243,15 +243,15 @@ func (p *KafkaProvider) generateSASLConfig(ctx context.Context, sasl SASLConfigM saslMechanism := p.getEnv("SASL_MECHANISM", "aws-msk-iam") if !sasl.Mechanism.IsNull() { - saslMechanism = sasl.Mechanism.Value + saslMechanism = sasl.Mechanism.ValueString() } saslUsername := p.getEnv("SASL_USERNAME", "") if !sasl.Mechanism.IsNull() { - saslUsername = sasl.Username.Value + saslUsername = sasl.Username.ValueString() } saslPassword := p.getEnv("SASL_PASSWORD", "") if !sasl.Mechanism.IsNull() { - saslPassword = sasl.Password.Value + saslPassword = sasl.Password.ValueString() } switch admin.SASLMechanism(saslMechanism) { @@ -273,7 +273,7 @@ func (p *KafkaProvider) generateSASLConfig(ctx context.Context, sasl SASLConfigM Mechanism: admin.SASLMechanismAWSMSKIAM, }, nil } - return admin.SASLConfig{}, fmt.Errorf("unable to detect SASL mechanism: %s", sasl.Mechanism.Value) + return admin.SASLConfig{}, fmt.Errorf("unable to detect SASL mechanism: %s", sasl.Mechanism.ValueString()) } func (p *KafkaProvider) Resources(ctx context.Context) []func() resource.Resource { diff --git a/internal/provider/topic_data_source.go b/internal/provider/topic_data_source.go index 1791485..74a74fa 100644 --- a/internal/provider/topic_data_source.go +++ b/internal/provider/topic_data_source.go @@ -107,7 +107,7 @@ func (d *TopicDataSource) Read(ctx context.Context, req datasource.ReadRequest, return } - topicInfo, err := d.client.GetTopic(ctx, data.Name.Value, true) + topicInfo, err := d.client.GetTopic(ctx, data.Name.ValueString(), true) if err != nil { resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read topic, got error: %s", err)) return @@ -119,20 +119,20 @@ func (d *TopicDataSource) Read(ctx context.Context, req datasource.ReadRequest, return } - data.ID = types.String{Value: topicInfo.Name} - data.Name = types.String{Value: topicInfo.Name} - data.Partitions = types.Int64{Value: int64(len(topicInfo.Partitions))} - data.ReplicationFactor = types.Int64{Value: int64(replicationFactor)} - data.Version = types.Int64{Value: int64(topicInfo.Version)} + data.ID = types.StringValue(topicInfo.Name) + data.Name = types.StringValue(topicInfo.Name) + data.Partitions = types.Int64Value(int64(len(topicInfo.Partitions))) + data.ReplicationFactor = types.Int64Value(int64(replicationFactor)) + data.Version = types.Int64Value(int64(topicInfo.Version)) configElement := make(map[string]attr.Value) for k, v := range topicInfo.Config { - configElement[k] = types.String{Value: v} - } - data.Config = types.Map{ - ElemType: types.StringType, - Elems: configElement, + configElement[k] = types.StringValue(v) } + data.Config = types.MapValueMust( + types.StringType, + configElement, + ) // Write logs using the tflog package // Documentation: https://terraform.io/plugin/log diff --git a/internal/provider/topic_resource.go b/internal/provider/topic_resource.go index fe3998c..eac6659 100644 --- a/internal/provider/topic_resource.go +++ b/internal/provider/topic_resource.go @@ -91,10 +91,10 @@ func (r *TopicResource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagn Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ resource.UseStateForUnknown(), - modifier.DefaultAttribute(types.Map{ - ElemType: types.StringType, - Elems: map[string]attr.Value{}, - }), + modifier.DefaultAttribute(types.MapValueMust( + types.StringType, + map[string]attr.Value{}, + )), }, }, }, @@ -133,7 +133,7 @@ func (r *TopicResource) Create(ctx context.Context, req resource.CreateRequest, // Generate KafkaConfig var configEntries []kafka.ConfigEntry - for k, v := range data.Config.Elems { + for k, v := range data.Config.Elements() { configEntries = append(configEntries, kafka.ConfigEntry{ ConfigName: k, // TODO: Why do we have to do this ugly remove quotes? @@ -142,13 +142,13 @@ func (r *TopicResource) Create(ctx context.Context, req resource.CreateRequest, }) } topicConfig := kafka.TopicConfig{ - Topic: data.Name.Value, - NumPartitions: int(data.Partitions.Value), - ReplicationFactor: int(data.ReplicationFactor.Value), + Topic: data.Name.ValueString(), + NumPartitions: int(data.Partitions.ValueInt64()), + ReplicationFactor: int(data.ReplicationFactor.ValueInt64()), ConfigEntries: configEntries, } - tflog.Info(ctx, fmt.Sprintf("Creating topic %s", data.Name.Value)) + tflog.Info(ctx, fmt.Sprintf("Creating topic %s", data.Name.ValueString())) createRequest := kafka.CreateTopicsRequest{ Topics: []kafka.TopicConfig{topicConfig}, } @@ -180,7 +180,7 @@ func (r *TopicResource) Read(ctx context.Context, req resource.ReadRequest, resp return } - topicInfo, err := r.client.GetTopic(ctx, data.ID.Value, true) + topicInfo, err := r.client.GetTopic(ctx, data.ID.ValueString(), true) if err != nil { resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read topic, got error: %s", err)) return @@ -192,17 +192,17 @@ func (r *TopicResource) Read(ctx context.Context, req resource.ReadRequest, resp return } - data.Name = types.String{Value: topicInfo.Name} - data.Partitions = types.Int64{Value: int64(len(topicInfo.Partitions))} - data.ReplicationFactor = types.Int64{Value: int64(replicationFactor)} + data.Name = types.StringValue(topicInfo.Name) + data.Partitions = types.Int64Value(int64(len(topicInfo.Partitions))) + data.ReplicationFactor = types.Int64Value(int64(replicationFactor)) configElement := map[string]attr.Value{} for k, v := range topicInfo.Config { - configElement[k] = types.String{Value: v} - } - data.Config = types.Map{ - ElemType: types.StringType, - Elems: configElement, + configElement[k] = types.StringValue(v) } + data.Config = types.MapValueMust( + types.StringType, + configElement, + ) // Save updated data into Terraform state resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) @@ -271,7 +271,7 @@ func (r *TopicResource) Update(ctx context.Context, req resource.UpdateRequest, func (r *TopicResource) updateConfig(ctx context.Context, data *TopicResourceModel, req resource.UpdateRequest, resp *resource.UpdateResponse) error { // Generate KafkaConfig var configEntries []kafka.ConfigEntry - for k, v := range data.Config.Elems { + for k, v := range data.Config.Elements() { configEntries = append(configEntries, kafka.ConfigEntry{ ConfigName: k, // TODO: Why do we have to do this ugly remove quotes? @@ -284,7 +284,7 @@ func (r *TopicResource) updateConfig(ctx context.Context, data *TopicResourceMod Resources: []kafka.AlterConfigRequestResource{ { ResourceType: kafka.ResourceTypeTopic, - ResourceName: data.Name.Value, + ResourceName: data.Name.ValueString(), Configs: configEntriesToAlterConfigs(configEntries), }, }, @@ -323,11 +323,11 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi return err } - if data.ReplicationFactor.Value > int64(len(brokerIDs)) { + if data.ReplicationFactor.ValueInt64() > int64(len(brokerIDs)) { return fmt.Errorf("replication factor cannot be higher than the number of brokers") } - topicInfo, err := r.client.GetTopic(ctx, data.Name.Value, false) + topicInfo, err := r.client.GetTopic(ctx, data.Name.ValueString(), false) if err != nil { return err } @@ -345,7 +345,7 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi return err } for _, topic := range topics { - if topic.Name != data.Name.Value { + if topic.Name != data.Name.ValueString() { nonAppliedTopics = append( nonAppliedTopics, topic, @@ -353,8 +353,8 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi } } - replicasWanted := data.ReplicationFactor.Value - replicasPresent := state.ReplicationFactor.Value + replicasWanted := data.ReplicationFactor.ValueInt64() + replicasPresent := state.ReplicationFactor.ValueInt64() var newPartitionsInfo []admin.PartitionInfo for _, partition := range topicInfo.Partitions { @@ -376,7 +376,7 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi picker := pickers.NewClusterUsePicker(brokersInfo, nonAppliedTopics) assigner := assigners.NewCrossRackAssigner(brokersInfo, picker) - assignments, err := assigner.Assign(data.Name.Value, newAssignments) + assignments, err := assigner.Assign(data.Name.ValueString(), newAssignments) if err != nil { return err } @@ -390,7 +390,7 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi apiAssignments = append(apiAssignments, apiAssignment) } alterPartitionReassignmentsRequest := kafka.AlterPartitionReassignmentsRequest{ - Topic: data.Name.Value, + Topic: data.Name.ValueString(), Assignments: apiAssignments, } @@ -456,7 +456,7 @@ func reduceReplicas(desired int, replicas []int, leader int) []int { } func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResourceModel, data *TopicResourceModel, req resource.UpdateRequest, resp *resource.UpdateResponse) error { - if data.Partitions.Value < state.Partitions.Value { + if data.Partitions.ValueInt64() < state.Partitions.ValueInt64() { return fmt.Errorf("partition count can't be reduced") } @@ -468,7 +468,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour if err != nil { return err } - topicInfo, err := r.client.GetTopic(ctx, data.Name.Value, false) + topicInfo, err := r.client.GetTopic(ctx, data.Name.ValueString(), false) if err != nil { return err } @@ -477,7 +477,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour for _, b := range brokersInfo { tflog.Debug(ctx, fmt.Sprintf("Broker ID: %v Rack: %s", b.ID, b.Rack)) } - extraPartitions := int(data.Partitions.Value) - int(state.Partitions.Value) + extraPartitions := int(data.Partitions.ValueInt64()) - int(state.Partitions.ValueInt64()) picker := pickers.NewRandomizedPicker() extender := extenders.NewBalancedExtender( @@ -486,7 +486,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour picker, ) desiredAssignments, err := extender.Extend( - data.Name.Value, + data.Name.ValueString(), currAssignments, extraPartitions, ) @@ -497,7 +497,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour tflog.Info(ctx, fmt.Sprintf("Assignments: %v", desiredAssignments)) - err = r.client.AddPartitions(ctx, data.Name.Value, desiredAssignments) + err = r.client.AddPartitions(ctx, data.Name.ValueString(), desiredAssignments) if err != nil { return err } @@ -516,7 +516,7 @@ func (r *TopicResource) Delete(ctx context.Context, req resource.DeleteRequest, } clientResp, err := r.client.GetConnector().KafkaClient.DeleteTopics(ctx, &kafka.DeleteTopicsRequest{ - Topics: []string{data.Name.Value}, + Topics: []string{data.Name.ValueString()}, }) if err != nil { resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete topic, got error: %s", err))