Skip to content

Commit

Permalink
Bump github.com/hashicorp/terraform-plugin-framework from 0.15.0 to 0…
Browse files Browse the repository at this point in the history
….16.0 (#8)

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Gonzalo Peci <pecigonzalo@users.noreply.github.com>
  • Loading branch information
dependabot[bot] and pecigonzalo authored Nov 24, 2022
1 parent 7daa69c commit f931745
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 56 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.18

require (
github.com/hashicorp/terraform-plugin-docs v0.13.0
github.com/hashicorp/terraform-plugin-framework v0.15.0
github.com/hashicorp/terraform-plugin-framework v0.16.0
github.com/hashicorp/terraform-plugin-go v0.14.2
github.com/hashicorp/terraform-plugin-log v0.7.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,8 @@ github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e
github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM=
github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY=
github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ=
github.com/hashicorp/terraform-plugin-framework v0.15.0 h1:6f4UY2yfp5UsSX9JhUA6RSptjd+ojStBGWA4jrPhB6Q=
github.com/hashicorp/terraform-plugin-framework v0.15.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8=
github.com/hashicorp/terraform-plugin-framework v0.16.0 h1:kEHh0d6dp5Ig/ey6PYXkWDZPMLIW8Me41T/Oa7bpO4s=
github.com/hashicorp/terraform-plugin-framework v0.16.0/go.mod h1:Vk5MuIJoE1qksHZawAZr6psx6YXsQBFIKDrWbROrwus=
github.com/hashicorp/terraform-plugin-go v0.14.2 h1:rhsVEOGCnY04msNymSvbUsXfRLKh9znXZmHlf5e8mhE=
github.com/hashicorp/terraform-plugin-go v0.14.2/go.mod h1:Q12UjumPNGiFsZffxOsA40Tlz1WVXt2Evh865Zj0+UA=
github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs=
Expand Down
18 changes: 9 additions & 9 deletions internal/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ func (p *KafkaProvider) Configure(ctx context.Context, req provider.ConfigureReq
boostrapServers := strings.Split(bootstrapServersString, ",")
boostrapServer := boostrapServers[0] // Select the first server on the list
if len(config.BootstrapServers) > 0 {
boostrapServer = config.BootstrapServers[0].Value
boostrapServer = config.BootstrapServers[0].ValueString()
}
// We only require 1 server
brokerConfig.BrokerAddr = boostrapServer
Expand All @@ -185,7 +185,7 @@ func (p *KafkaProvider) Configure(ctx context.Context, req provider.ConfigureReq
// SASL configuration
saslConfigEnabled := p.getEnvBool("SASL_ENABLED", true)
if !config.SASL.Enabled.IsNull() {
saslConfigEnabled = config.SASL.Enabled.Value
saslConfigEnabled = config.SASL.Enabled.ValueBool()
}
if saslConfigEnabled {
saslConfig, err := p.generateSASLConfig(ctx, config.SASL, resp)
Expand All @@ -197,13 +197,13 @@ func (p *KafkaProvider) Configure(ctx context.Context, req provider.ConfigureReq
}

// Configure TLS settings
brokerConfig.TLS.Enabled = config.TLS.Enabled.Value
brokerConfig.TLS.SkipVerify = config.TLS.SkipVerify.Value
brokerConfig.TLS.Enabled = config.TLS.Enabled.ValueBool()
brokerConfig.TLS.SkipVerify = config.TLS.SkipVerify.ValueBool()

// Configure timeout
defaultTimeout := int64(p.getEnvInt("TIMEOUT", 300))
if !config.Timeout.IsNull() {
defaultTimeout = config.Timeout.Value
defaultTimeout = config.Timeout.ValueInt64()
}
kafkaClientTimeout := time.Second * time.Duration(defaultTimeout)

Expand Down Expand Up @@ -243,15 +243,15 @@ func (p *KafkaProvider) generateSASLConfig(ctx context.Context, sasl SASLConfigM

saslMechanism := p.getEnv("SASL_MECHANISM", "aws-msk-iam")
if !sasl.Mechanism.IsNull() {
saslMechanism = sasl.Mechanism.Value
saslMechanism = sasl.Mechanism.ValueString()
}
saslUsername := p.getEnv("SASL_USERNAME", "")
if !sasl.Mechanism.IsNull() {
saslUsername = sasl.Username.Value
saslUsername = sasl.Username.ValueString()
}
saslPassword := p.getEnv("SASL_PASSWORD", "")
if !sasl.Mechanism.IsNull() {
saslPassword = sasl.Password.Value
saslPassword = sasl.Password.ValueString()
}

switch admin.SASLMechanism(saslMechanism) {
Expand All @@ -273,7 +273,7 @@ func (p *KafkaProvider) generateSASLConfig(ctx context.Context, sasl SASLConfigM
Mechanism: admin.SASLMechanismAWSMSKIAM,
}, nil
}
return admin.SASLConfig{}, fmt.Errorf("unable to detect SASL mechanism: %s", sasl.Mechanism.Value)
return admin.SASLConfig{}, fmt.Errorf("unable to detect SASL mechanism: %s", sasl.Mechanism.ValueString())
}

func (p *KafkaProvider) Resources(ctx context.Context) []func() resource.Resource {
Expand Down
22 changes: 11 additions & 11 deletions internal/provider/topic_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ func (d *TopicDataSource) Read(ctx context.Context, req datasource.ReadRequest,
return
}

topicInfo, err := d.client.GetTopic(ctx, data.Name.Value, true)
topicInfo, err := d.client.GetTopic(ctx, data.Name.ValueString(), true)
if err != nil {
resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read topic, got error: %s", err))
return
Expand All @@ -119,20 +119,20 @@ func (d *TopicDataSource) Read(ctx context.Context, req datasource.ReadRequest,
return
}

data.ID = types.String{Value: topicInfo.Name}
data.Name = types.String{Value: topicInfo.Name}
data.Partitions = types.Int64{Value: int64(len(topicInfo.Partitions))}
data.ReplicationFactor = types.Int64{Value: int64(replicationFactor)}
data.Version = types.Int64{Value: int64(topicInfo.Version)}
data.ID = types.StringValue(topicInfo.Name)
data.Name = types.StringValue(topicInfo.Name)
data.Partitions = types.Int64Value(int64(len(topicInfo.Partitions)))
data.ReplicationFactor = types.Int64Value(int64(replicationFactor))
data.Version = types.Int64Value(int64(topicInfo.Version))

configElement := make(map[string]attr.Value)
for k, v := range topicInfo.Config {
configElement[k] = types.String{Value: v}
}
data.Config = types.Map{
ElemType: types.StringType,
Elems: configElement,
configElement[k] = types.StringValue(v)
}
data.Config = types.MapValueMust(
types.StringType,
configElement,
)

// Write logs using the tflog package
// Documentation: https://terraform.io/plugin/log
Expand Down
66 changes: 33 additions & 33 deletions internal/provider/topic_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,10 @@ func (r *TopicResource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagn
Computed: true,
PlanModifiers: []tfsdk.AttributePlanModifier{
resource.UseStateForUnknown(),
modifier.DefaultAttribute(types.Map{
ElemType: types.StringType,
Elems: map[string]attr.Value{},
}),
modifier.DefaultAttribute(types.MapValueMust(
types.StringType,
map[string]attr.Value{},
)),
},
},
},
Expand Down Expand Up @@ -133,7 +133,7 @@ func (r *TopicResource) Create(ctx context.Context, req resource.CreateRequest,

// Generate KafkaConfig
var configEntries []kafka.ConfigEntry
for k, v := range data.Config.Elems {
for k, v := range data.Config.Elements() {
configEntries = append(configEntries, kafka.ConfigEntry{
ConfigName: k,
// TODO: Why do we have to do this ugly remove quotes?
Expand All @@ -142,13 +142,13 @@ func (r *TopicResource) Create(ctx context.Context, req resource.CreateRequest,
})
}
topicConfig := kafka.TopicConfig{
Topic: data.Name.Value,
NumPartitions: int(data.Partitions.Value),
ReplicationFactor: int(data.ReplicationFactor.Value),
Topic: data.Name.ValueString(),
NumPartitions: int(data.Partitions.ValueInt64()),
ReplicationFactor: int(data.ReplicationFactor.ValueInt64()),
ConfigEntries: configEntries,
}

tflog.Info(ctx, fmt.Sprintf("Creating topic %s", data.Name.Value))
tflog.Info(ctx, fmt.Sprintf("Creating topic %s", data.Name.ValueString()))
createRequest := kafka.CreateTopicsRequest{
Topics: []kafka.TopicConfig{topicConfig},
}
Expand Down Expand Up @@ -180,7 +180,7 @@ func (r *TopicResource) Read(ctx context.Context, req resource.ReadRequest, resp
return
}

topicInfo, err := r.client.GetTopic(ctx, data.ID.Value, true)
topicInfo, err := r.client.GetTopic(ctx, data.ID.ValueString(), true)
if err != nil {
resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read topic, got error: %s", err))
return
Expand All @@ -192,17 +192,17 @@ func (r *TopicResource) Read(ctx context.Context, req resource.ReadRequest, resp
return
}

data.Name = types.String{Value: topicInfo.Name}
data.Partitions = types.Int64{Value: int64(len(topicInfo.Partitions))}
data.ReplicationFactor = types.Int64{Value: int64(replicationFactor)}
data.Name = types.StringValue(topicInfo.Name)
data.Partitions = types.Int64Value(int64(len(topicInfo.Partitions)))
data.ReplicationFactor = types.Int64Value(int64(replicationFactor))
configElement := map[string]attr.Value{}
for k, v := range topicInfo.Config {
configElement[k] = types.String{Value: v}
}
data.Config = types.Map{
ElemType: types.StringType,
Elems: configElement,
configElement[k] = types.StringValue(v)
}
data.Config = types.MapValueMust(
types.StringType,
configElement,
)

// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
Expand Down Expand Up @@ -271,7 +271,7 @@ func (r *TopicResource) Update(ctx context.Context, req resource.UpdateRequest,
func (r *TopicResource) updateConfig(ctx context.Context, data *TopicResourceModel, req resource.UpdateRequest, resp *resource.UpdateResponse) error {
// Generate KafkaConfig
var configEntries []kafka.ConfigEntry
for k, v := range data.Config.Elems {
for k, v := range data.Config.Elements() {
configEntries = append(configEntries, kafka.ConfigEntry{
ConfigName: k,
// TODO: Why do we have to do this ugly remove quotes?
Expand All @@ -284,7 +284,7 @@ func (r *TopicResource) updateConfig(ctx context.Context, data *TopicResourceMod
Resources: []kafka.AlterConfigRequestResource{
{
ResourceType: kafka.ResourceTypeTopic,
ResourceName: data.Name.Value,
ResourceName: data.Name.ValueString(),
Configs: configEntriesToAlterConfigs(configEntries),
},
},
Expand Down Expand Up @@ -323,11 +323,11 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi
return err
}

if data.ReplicationFactor.Value > int64(len(brokerIDs)) {
if data.ReplicationFactor.ValueInt64() > int64(len(brokerIDs)) {
return fmt.Errorf("replication factor cannot be higher than the number of brokers")
}

topicInfo, err := r.client.GetTopic(ctx, data.Name.Value, false)
topicInfo, err := r.client.GetTopic(ctx, data.Name.ValueString(), false)
if err != nil {
return err
}
Expand All @@ -345,16 +345,16 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi
return err
}
for _, topic := range topics {
if topic.Name != data.Name.Value {
if topic.Name != data.Name.ValueString() {
nonAppliedTopics = append(
nonAppliedTopics,
topic,
)
}
}

replicasWanted := data.ReplicationFactor.Value
replicasPresent := state.ReplicationFactor.Value
replicasWanted := data.ReplicationFactor.ValueInt64()
replicasPresent := state.ReplicationFactor.ValueInt64()

var newPartitionsInfo []admin.PartitionInfo
for _, partition := range topicInfo.Partitions {
Expand All @@ -376,7 +376,7 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi
picker := pickers.NewClusterUsePicker(brokersInfo, nonAppliedTopics)
assigner := assigners.NewCrossRackAssigner(brokersInfo, picker)

assignments, err := assigner.Assign(data.Name.Value, newAssignments)
assignments, err := assigner.Assign(data.Name.ValueString(), newAssignments)
if err != nil {
return err
}
Expand All @@ -390,7 +390,7 @@ func (r *TopicResource) updateReplicationFactor(ctx context.Context, state *Topi
apiAssignments = append(apiAssignments, apiAssignment)
}
alterPartitionReassignmentsRequest := kafka.AlterPartitionReassignmentsRequest{
Topic: data.Name.Value,
Topic: data.Name.ValueString(),
Assignments: apiAssignments,
}

Expand Down Expand Up @@ -456,7 +456,7 @@ func reduceReplicas(desired int, replicas []int, leader int) []int {
}

func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResourceModel, data *TopicResourceModel, req resource.UpdateRequest, resp *resource.UpdateResponse) error {
if data.Partitions.Value < state.Partitions.Value {
if data.Partitions.ValueInt64() < state.Partitions.ValueInt64() {
return fmt.Errorf("partition count can't be reduced")
}

Expand All @@ -468,7 +468,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour
if err != nil {
return err
}
topicInfo, err := r.client.GetTopic(ctx, data.Name.Value, false)
topicInfo, err := r.client.GetTopic(ctx, data.Name.ValueString(), false)
if err != nil {
return err
}
Expand All @@ -477,7 +477,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour
for _, b := range brokersInfo {
tflog.Debug(ctx, fmt.Sprintf("Broker ID: %v Rack: %s", b.ID, b.Rack))
}
extraPartitions := int(data.Partitions.Value) - int(state.Partitions.Value)
extraPartitions := int(data.Partitions.ValueInt64()) - int(state.Partitions.ValueInt64())

picker := pickers.NewRandomizedPicker()
extender := extenders.NewBalancedExtender(
Expand All @@ -486,7 +486,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour
picker,
)
desiredAssignments, err := extender.Extend(
data.Name.Value,
data.Name.ValueString(),
currAssignments,
extraPartitions,
)
Expand All @@ -497,7 +497,7 @@ func (r *TopicResource) updatePartitions(ctx context.Context, state *TopicResour

tflog.Info(ctx, fmt.Sprintf("Assignments: %v", desiredAssignments))

err = r.client.AddPartitions(ctx, data.Name.Value, desiredAssignments)
err = r.client.AddPartitions(ctx, data.Name.ValueString(), desiredAssignments)
if err != nil {
return err
}
Expand All @@ -516,7 +516,7 @@ func (r *TopicResource) Delete(ctx context.Context, req resource.DeleteRequest,
}

clientResp, err := r.client.GetConnector().KafkaClient.DeleteTopics(ctx, &kafka.DeleteTopicsRequest{
Topics: []string{data.Name.Value},
Topics: []string{data.Name.ValueString()},
})
if err != nil {
resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete topic, got error: %s", err))
Expand Down

0 comments on commit f931745

Please sign in to comment.