Skip to content

Commit 68e13be

Browse files
committed
[Fix] Fix drift in databricks_cluster when is_single_node is set to true
1 parent bd8edc3 commit 68e13be

File tree

3 files changed

+162
-6
lines changed

3 files changed

+162
-6
lines changed

NEXT_CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
### Bug Fixes
88

9+
* Fix `databricks_cluster` drift when `is_single_node` is `true` ([#4360](https://github.com/databricks/terraform-provider-databricks/issues/4360)).
910
* Correctly handle PAT and OBO tokens without expiration ([#4444](https://github.com/databricks/terraform-provider-databricks/pull/4444)).
1011

1112
### Documentation

clusters/resource_cluster.go

Lines changed: 55 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,17 @@ const (
3131

3232
func ResourceCluster() common.Resource {
3333
return common.Resource{
34-
Create: resourceClusterCreate,
35-
Read: resourceClusterRead,
36-
Update: resourceClusterUpdate,
37-
Delete: resourceClusterDelete,
38-
Schema: clusterSchema,
34+
Create: resourceClusterCreate,
35+
Read: resourceClusterRead,
36+
Update: resourceClusterUpdate,
37+
Delete: resourceClusterDelete,
38+
Schema: clusterSchema,
39+
CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error {
40+
if isSingleNode, ok := d.GetOk("is_single_node"); ok && isSingleNode.(bool) {
41+
return singleNodeClusterChangesCustomizeDiff(d)
42+
}
43+
return nil
44+
},
3945
SchemaVersion: clusterSchemaVersion,
4046
Timeouts: resourceClusterTimeouts(),
4147
StateUpgraders: []schema.StateUpgrader{
@@ -48,6 +54,48 @@ func ResourceCluster() common.Resource {
4854
}
4955
}
5056

57+
// the API automatically sets the `ResourceClass` key in `custom_tags` and two other keys in the `spark_conf`.
58+
// If the user hasn't set these explicitly in their config, the plan marks these keys for removal.
59+
// This method copies the values for these keys from state to the plan.
60+
// This needs to be done in addition to setting these attributes as computed; otherwise, this customization
61+
// won't take effect for users who have set additional `spark_conf` or `custom_tags`.
62+
func singleNodeClusterChangesCustomizeDiff(d *schema.ResourceDiff) error {
63+
autoConfigAttributes := map[string][]string{
64+
"custom_tags": {"ResourceClass"},
65+
"spark_conf": {"spark.databricks.cluster.profile", "spark.master"},
66+
}
67+
68+
for key, attributes := range autoConfigAttributes {
69+
if !d.HasChange(key) {
70+
continue
71+
}
72+
73+
o, n := d.GetChange(key)
74+
old, okOld := o.(map[string]interface{})
75+
new, okNew := n.(map[string]interface{})
76+
77+
if !okNew || !okOld {
78+
return fmt.Errorf("internal type casting error n: %T, o: %T", n, o)
79+
}
80+
81+
log.Printf("[DEBUG] values for key %s, old: %v, new: %v", key, old, new)
82+
83+
for _, attribute := range attributes {
84+
if _, exists := new[attribute]; exists && new[attribute] != nil {
85+
continue
86+
}
87+
88+
new[attribute] = old[attribute]
89+
}
90+
91+
if err := d.SetNew(key, new); err != nil {
92+
return err
93+
}
94+
}
95+
96+
return nil
97+
}
98+
5199
func clusterSchemaV0() cty.Type {
52100
return (&schema.Resource{
53101
Schema: clusterSchema}).CoreConfigSchema().ImpliedType()
@@ -346,7 +394,8 @@ func (ClusterSpec) CustomizeSchema(s *common.CustomizableSchema) *common.Customi
346394
s.SchemaPath("docker_image", "url").SetRequired()
347395
s.SchemaPath("docker_image", "basic_auth", "password").SetRequired().SetSensitive()
348396
s.SchemaPath("docker_image", "basic_auth", "username").SetRequired()
349-
s.SchemaPath("spark_conf").SetCustomSuppressDiff(SparkConfDiffSuppressFunc)
397+
s.SchemaPath("spark_conf").SetCustomSuppressDiff(SparkConfDiffSuppressFunc).SetComputed().SetOptional()
398+
s.SchemaPath("custom_tags").SetComputed().SetOptional()
350399
s.SchemaPath("aws_attributes").SetSuppressDiff().SetConflictsWith([]string{"azure_attributes", "gcp_attributes"})
351400
s.SchemaPath("aws_attributes", "zone_id").SetCustomSuppressDiff(ZoneDiffSuppress)
352401
s.SchemaPath("azure_attributes").SetSuppressDiff().SetConflictsWith([]string{"aws_attributes", "gcp_attributes"})

clusters/resource_cluster_test.go

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"github.com/databricks/databricks-sdk-go/service/compute"
99
"github.com/databricks/terraform-provider-databricks/common"
1010
"github.com/databricks/terraform-provider-databricks/qa"
11+
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
1112
"github.com/stretchr/testify/assert"
1213
"github.com/stretchr/testify/require"
1314
)
@@ -1630,6 +1631,111 @@ func TestResourceClusterCreate_SingleNode(t *testing.T) {
16301631
assert.NoError(t, err)
16311632
assert.Equal(t, 0, d.Get("num_workers"))
16321633
}
1634+
1635+
func TestResourceClusterCreate_SingleNodeAutoPropertiesCustomizeDiff(t *testing.T) {
1636+
testCases := []struct {
1637+
name string
1638+
hcl string
1639+
instanceState map[string]string
1640+
expectedDiff map[string]*terraform.ResourceAttrDiff
1641+
}{
1642+
{
1643+
"resource with no custom_tags or spark_conf",
1644+
"",
1645+
map[string]string{
1646+
"custom_tags.ResourceClass": "SingleNode",
1647+
"spark_conf.spark.master": "local[*]",
1648+
"spark_conf.spark.databricks.cluster.profile": "singleNode",
1649+
},
1650+
nil,
1651+
},
1652+
{
1653+
"resource with custom_tags and spark_conf",
1654+
`custom_tags = {
1655+
"ClusterName" = "SingleNode"
1656+
}
1657+
spark_conf = {
1658+
"spark.databricks.delta.preview.enabled" = "true"
1659+
}`,
1660+
map[string]string{
1661+
"custom_tags.ClusterName": "SingleNode",
1662+
"custom_tags.ResourceClass": "SingleNode",
1663+
"spark_conf.spark.master": "local[*]",
1664+
"spark_conf.spark.databricks.cluster.profile": "singleNode",
1665+
"spark_conf.spark.databricks.delta.preview.enabled": "true",
1666+
},
1667+
nil,
1668+
},
1669+
{
1670+
"resource with custom_tags and spark_conf changes",
1671+
`custom_tags = {
1672+
"ClusterName" = "MonoNode"
1673+
}
1674+
spark_conf = {
1675+
"spark.databricks.delta.preview.enabled" = "false"
1676+
}`,
1677+
map[string]string{
1678+
"custom_tags.ClusterName": "SingleNode",
1679+
"custom_tags.ResourceClass": "SingleNode",
1680+
"spark_conf.spark.master": "local[*]",
1681+
"spark_conf.spark.databricks.cluster.profile": "singleNode",
1682+
"spark_conf.spark.databricks.delta.preview.enabled": "true",
1683+
},
1684+
map[string]*terraform.ResourceAttrDiff{
1685+
"custom_tags.ClusterName": {New: "MonoNode", Old: "SingleNode"},
1686+
"spark_conf.spark.databricks.delta.preview.enabled": {New: "false", Old: "true"},
1687+
},
1688+
},
1689+
}
1690+
for _, testCase := range testCases {
1691+
t.Run(testCase.name, func(t *testing.T) {
1692+
expectedDiff := testCase.expectedDiff
1693+
1694+
if expectedDiff == nil {
1695+
expectedDiff = make(map[string]*terraform.ResourceAttrDiff)
1696+
}
1697+
1698+
expectedDiff["default_tags.%"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1699+
expectedDiff["driver_instance_pool_id"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1700+
expectedDiff["driver_node_type_id"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1701+
expectedDiff["enable_elastic_disk"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1702+
expectedDiff["enable_local_disk_encryption"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1703+
expectedDiff["state"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1704+
expectedDiff["url"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
1705+
1706+
instanceState := testCase.instanceState
1707+
instanceState["cluster_id"] = "abc"
1708+
instanceState["autotermination_minutes"] = "60"
1709+
instanceState["cluster_name"] = "Single Node Cluster"
1710+
instanceState["data_security_mode"] = "SINGLE_USER"
1711+
instanceState["spark_version"] = "15.4.x-scala2.12"
1712+
instanceState["single_user_name"] = "testuser"
1713+
instanceState["runtime_engine"] = "STANDARD"
1714+
instanceState["num_workers"] = "0"
1715+
instanceState["node_type_id"] = "Standard_F4s"
1716+
instanceState["kind"] = "CLASSIC_PREVIEW"
1717+
instanceState["is_single_node"] = "true"
1718+
1719+
qa.ResourceFixture{
1720+
HCL: `
1721+
spark_version = "15.4.x-scala2.12"
1722+
runtime_engine = "STANDARD"
1723+
node_type_id = "Standard_F4s"
1724+
kind = "CLASSIC_PREVIEW"
1725+
cluster_name = "Single Node Cluster"
1726+
data_security_mode = "SINGLE_USER"
1727+
autotermination_minutes = 60
1728+
is_single_node = true
1729+
single_user_name = "testuser"
1730+
` + testCase.hcl,
1731+
InstanceState: instanceState,
1732+
ExpectedDiff: expectedDiff,
1733+
Resource: ResourceCluster(),
1734+
}.ApplyNoError(t)
1735+
})
1736+
}
1737+
}
1738+
16331739
func TestResourceClusterCreate_NegativeNumWorkers(t *testing.T) {
16341740
_, err := qa.ResourceFixture{
16351741
Create: true,

0 commit comments

Comments
 (0)