diff --git a/.goreleaser.yml b/.goreleaser.yml
index a546251..591f01f 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,13 +1,14 @@
# Visit https://goreleaser.com for documentation on how to customize this
# behavior.
+version: 2
env:
- - PROVIDER_VERSION=3.4.1
+ - PROVIDER_VERSION=4.0.0
before:
hooks:
# this is just an example and not a requirement for provider building/publishing
- go mod tidy
snapshot:
- name_template: '{{ .Env.PROVIDER_VERSION }}'
+ version_template: '{{ .Env.PROVIDER_VERSION }}'
builds:
- env:
# goreleaser does not work with CGO, it could also complicate
@@ -41,7 +42,7 @@ builds:
- CP_TARGET={{ .Target }}
- CP_VERSION={{ .Env.PROVIDER_VERSION }}
archives:
-- format: zip
+- formats: [ 'zip' ]
name_template: '{{ .ProjectName }}_{{ .Env.PROVIDER_VERSION }}_{{ .Os }}_{{ .Arch }}'
checksum:
extra_files:
diff --git a/.whitesource b/.whitesource
index c1c0169..ee0fa40 100644
--- a/.whitesource
+++ b/.whitesource
@@ -1,48 +1,48 @@
{
- "scanSettings": {
- "configMode": "AUTO",
- "configExternalURL": "",
- "projectToken": "",
- "enableLicenseViolations": "true",
- "displayLicenseViolations": "true",
- "enableIaC": "true",
- "baseBranches": []
- },
- "scanSettingsSAST": {
- "enableScan": true,
- "scanPullRequests": true,
- "incrementalScan": true,
- "baseBranches": [],
- "snippetSize": 10
- },
- "checkRunSettings": {
- "vulnerableCheckRunConclusionLevel": "failure",
- "displayMode": "diff",
- "useMendCheckNames": true
- },
- "checkRunSettingsSAST": {
- "checkRunConclusionLevel": "failure",
- "severityThreshold": "high"
- },
- "issueSettings": {
- "minSeverityLevel": "LOW",
- "issueType": "DEPENDENCY"
- },
- "issueSettingsSAST": {
- "minSeverityLevel": "high",
- "issueType": "repo"
- },
- "remediateSettings": {
- "workflowRules": {
- "enabled": true
- }
- },
- "imageSettings":{
- "imageTracing":{
- "enableImageTracingPR": false,
- "addRepositoryCoordinate": false,
- "addDockerfilePath": false,
- "addMendIdentifier": false
- }
+ "scanSettings": {
+ "configMode": "AUTO",
+ "configExternalURL": "",
+ "projectToken": "",
+ "enableLicenseViolations": "true",
+ "displayLicenseViolations": "true",
+ "enableIaC": "true",
+ "baseBranches": []
+ },
+ "scanSettingsSAST": {
+ "enableScan": true,
+ "scanPullRequests": true,
+ "incrementalScan": true,
+ "baseBranches": [],
+ "snippetSize": 10
+ },
+ "checkRunSettings": {
+ "vulnerableCheckRunConclusionLevel": "failure",
+ "displayMode": "diff",
+ "useMendCheckNames": true
+ },
+ "checkRunSettingsSAST": {
+ "checkRunConclusionLevel": "failure",
+ "severityThreshold": "high"
+ },
+ "issueSettings": {
+ "minSeverityLevel": "LOW",
+ "issueType": "DEPENDENCY"
+ },
+ "issueSettingsSAST": {
+ "minSeverityLevel": "high",
+ "issueType": "repo"
+ },
+ "remediateSettings": {
+ "workflowRules": {
+ "enabled": true
}
+ },
+ "imageSettings":{
+ "imageTracing":{
+ "enableImageTracingPR": false,
+ "addRepositoryCoordinate": false,
+ "addDockerfilePath": false,
+ "addMendIdentifier": false
+ }
+ }
}
\ No newline at end of file
diff --git a/GNUmakefile b/GNUmakefile
index 20fcb15..367ce52 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -3,7 +3,7 @@ HOSTNAME=delphix.com
NAMESPACE=dct
NAME=delphix
BINARY=terraform-provider-${NAME}
-VERSION=3.4.1
+VERSION=4.0.0
OS_ARCH=darwin_arm64
default: install
diff --git a/docs/index.md b/docs/index.md
index 49dae6c..034f961 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -14,7 +14,7 @@ If you are a DCT customer, you can also request support from the [Delphix Suppor
| Product | Version |
|--------------------------------|----------|
-| Data Control Tower (DCT) | v2025.1.2+ |
+| Data Control Tower (DCT) | v2025.2.0+ |
| Delphix Continuous Data Engine | v29.0.0.0+ |
Note: The DCT and Continuous Data versions above guarantees full provider support. However, each resource might support older versions. Refer to the specific resource documentation page for more information.
@@ -40,7 +40,7 @@ terraform {
required_providers {
delphix = {
source = "delphix-integrations/delphix"
- version = "3.4.0"
+ version = "4.0.0"
}
}
}
@@ -79,4 +79,6 @@ Consult the Resources section for details on individual resources, such as VDB,
| delphix_oracle_dsource | v 3.1.0 | v 10.0.0 |
| delphix_database_postgresql | v 3.2.0 | v 14.0.0 |
| delphix_vdb update
delphix_database_postgresql import | v 3.3.0 | v 22.0.0 |
-| delphix_oracle_dsource update
delphix_oracle_dsource import | v 3.4.0 | v 2025.1.2 |
+| delphix_oracle_dsource update
delphix_oracle_dsource import | v 3.4.1 | v 2025.1.2 |
+| delphix_appdata_dsource update
delphix_appdata_dsource import | v 4.0.0 | v 2025.2.0 |
+| delphix_environment update
delphix_environment import | v 4.0.0 | v 2025.2.0 |
\ No newline at end of file
diff --git a/docs/resources/appdata_dsource.md b/docs/resources/appdata_dsource.md
index 6269b53..b72da9b 100644
--- a/docs/resources/appdata_dsource.md
+++ b/docs/resources/appdata_dsource.md
@@ -1,194 +1,197 @@
# Resource: delphix_appdata_dsource
-In Delphix terminology, a dSource is a database that the Delphix Continuous Data Engine uses to create and update virtual copies of your database.
-A dSource is created and managed by the Delphix Continuous Data Engine.
-
-The Appdata dSource resource allows Terraform to create and delete AppData dSources. This specifically enables the apply and destroy Terraform commands. Modification of existing dSource resources via the apply command is not supported. All supported parameters are listed below.
-
-## System Requirements
-
-* Data Control Tower v10.0.1+ is required for dSource management. Lower versions are not supported.
-* This Appdata dSource Resource only supports Appdata based datasource's , such as POSTGRES,SAP HANA, IBM Db2, etc.The below examples are shown from the PostgreSQL context. See the Oracle dSource Resource for the support of Oracle. The Delphix Provider does not support Oracle, SQL Server, or SAP ASE.
-
-## Upgrade Guide
-* Any new dSource created post Version>=3.2.1 can set `wait_time` to wait for snapshot creation , dSources created prior to this version will not support this capability
-
-## Note
-* `status` and `enabled` are subject to change in the tfstate file based on the dSource state.
+In Delphix terminology, a dSource is an internal, read-only database copy that the Delphix Continuous Data Engine uses to create and update virtual copies of your database.
+A dSource is created and managed by the Delphix Continuous Data Engine and syncs with your chosen source database. The AppData dSource resource in Terraform allows you to create, update, delete and import AppData dSources. Updating existing dSource resource parameters via the apply command is supported for the parameters listed below.
+For Oracle, refer to the Oracle dSource resource. The Delphix Provider does not currently support SQL Server or SAP ASE.
+
+## Note
+* `status` and `enabled` are computed values and are subject to change in the tfstate file based on the dSource state.
+* Parameters `credentials_env_vars` within `ops_pre_sync` and `ops_post_sync` object blocks are not updatable. Therefore, any changes made on a Terraform state file do not reflect the actual value of the actual infrastructure
+* Sensitive values in `credentials_env_vars` are stored as plain text in the state file. We recommend following Terraform’s sensitive input variables documentation.
+* `source_value` and `group_id` parameters cannot be updated after the initial resource creation. However, any differences detected in these parameters are suppressed from the Terraform plan to prevent unnecessary drift detection.
+* Only valid for DCT versions 2025.1 and earlier:
+ * `Make_current_account_owner`,`wait_time` and `skip_wait_for_snapshot_creation` are applicable only during the creation of dsource. Note, these parameters are single-use and not applicable to updates.
+ * Any new dSource created post Version>=3.2.1 can set wait_time to wait for snapshot creation, dSources created prior to this version will not support this capability.
## Example Usage
-The linking of a dSource can be configured through various ingestion approaches. Each configuration is customized to the connector and its supported options. The three PostgreSQL parameter sets below show working examples.
+The linking of a dSource can be configured through various ingestion approaches. Each configuration is customized to the connector and its supported options. The three PostgreSQL parameter sets below show different ingestion configuration examples.
+# Link dSource using external backup
+
```hcl
-# Link dSource using external backup.
-
-resource "delphix_appdata_dsource" "dsource_name" {
- source_value = SOURCE_VALUE
- group_id = GROUP_ID
- log_sync_enabled = false
- make_current_account_owner = true
- link_type = LINK_TYPE
- name = DSOURCE_NAME
- staging_mount_base = MOUNT_PATH
- environment_user = ENV_USER
- staging_environment = STAGING_ENV
- parameters = jsonencode({
- externalBackup: [
- {
- keepStagingInSync: false,
- backupPath: BKP_PATH,
- walLogPath: LOG_PATH
- }
- ],
- postgresPort : PORT,
- mountLocation : MOUNT_PATH
- })
- sync_parameters = jsonencode({
- resync = true
- })
-}
-
-# Link dSource using Delphix Initiated Backup.
-
-resource "delphix_appdata_dsource" "dsource_name" {
- source_value = SOURCE_VALUE
- group_id = GROUP_ID
- log_sync_enabled = false
- make_current_account_owner = true
- link_type = LINK_TYPE
- name = DSOURCE_NAME
- staging_mount_base = MOUNT_PATH
- environment_user = ENV_USER
- staging_environment = STAGING_ENV
- parameters = jsonencode({
- delphixInitiatedBackupFlag : true,
- delphixInitiatedBackup : [
- {
- userName : USERNAME,
- postgresSourcePort : SOURCE_PORT,
- userPass : PASSWORD,
- sourceHostAddress : SOURCE_ADDRESS
- }
- ],
- postgresPort : PORT,
- mountLocation : MOUNT_PATH
- })
- sync_parameters = jsonencode({
- resync = true
- })
-}
-
-# Link dSource using Single Database Ingestion.
-
-resource "delphix_appdata_dsource" "dsource_name" {
- source_value = SOURCE_VALUE
- group_id = GROUP_ID
- log_sync_enabled = false
- make_current_account_owner = true
- link_type = LINK_TYPE
- name = DSOURCE_NAME
- staging_mount_base = MOUNT_PATH
- environment_user = ENV_USER
- staging_environment = STAGING_ENV
- parameters = jsonencode({
- singleDatabaseIngestionFlag : true,
- singleDatabaseIngestion : [
- {
- databaseUserName: DBUSER_NAME,
- sourcePort: SOURCE_PORT,
- dumpJobs: 2,
- restoreJobs: 2,
- databaseName: DB_NAME,
- databaseUserPassword: DB_PASS,
- dumpDir: DIR,
- sourceHost: SOURCE_HOST
- postgresqlFile: FILE
- }
- ],
- postgresPort : PORT,
- mountLocation : MOUNT_PATH
- })
- sync_parameters = jsonencode({
- resync = true
- })
-}
+resource "delphix_appdata_dsource" "pg_using_external_backup" {
+ name = DSOURCE_NAME
+ source_value = SOURCE_VALUE
+ group_id = DATASET_GROUP_ID
+ log_sync_enabled = false
+ link_type = "AppDataStaged"
+ staging_mount_base = MOUNT_PATH
+ environment_user = ENV_USER
+ staging_environment = STAGING_ENV
+ parameters = jsonencode({
+ externalBackup: [
+ {
+ keepStagingInSync: false,
+ backupPath: BKP_PATH,
+ walLogPath: LOG_PATH
+ }
+ ],
+ postgresPort : PORT,
+ mountLocation : MOUNT_PATH
+ })
+ sync_parameters = jsonencode({
+ resync = true
+ })
+ make_current_account_owner = true
+}
```
-## Argument Reference
-
-* `source_value` - (Required) Id or Name of the source to link.
-
-* `group_id` - (Required) Id of the dataset group where this dSource should belong to.
-
-* `log_sync_enabled` - (Required) True if LogSync should run for this database.
-
-* `make_current_account_owner` - (Required) Whether the account creating this reporting schedule must be configured as owner of the reporting schedule.
-
-* `rollback_on_failure` - Dsource linking operation when fails during snapsync creates a tainted dsource on the engine. Setting this flag to true will remove the tainted dsource from state as well as engine. By default, it is set to false, where the tainted dsource is maintained on the terraform state.
-
-* `description` - The notes/description for the dSource.
-
-* `link_type` - (Required) The type of link to create. Default is AppDataDirect.
- * `AppDataDirect` - Represents the AppData specific parameters of a link request for a source directly replicated into the Delphix Engine.
- * `AppDataStaged` - Represents the AppData specific parameters of a link request for a source with a staging source.
-
-* `name` - The unique name of the dSource. If unset, a name is randomly generated.
-
-* `staging_mount_base` - The base mount point for the NFS mount on the staging environment [AppDataStaged only].
-
-* `environment_user` - (Required) The OS user to use for linking.
-
-* `staging_environment` - (Required) The environment used as an intermediate stage to pull data into Delphix [AppDataStaged only].
-
-* `staging_environment_user` - The environment user used to access the staging environment [AppDataStaged only].
-
-* `tags` - The tags to be created for dSource. This is a map of 2 parameters:
- * `key` - (Required) Key of the tag
- * `value` - (Required) Value of the tag
-
-* `ops_pre_sync` - Operations to perform before syncing the created dSource. These operations can quiesce any data prior to syncing
- * `name` - Name of the hook
- * `command` - Command to be executed
- * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`
- * `credentials_env_vars` - List of environment variables that will contain credentials for this operation
- * `base_var_name` - Base name of the environment variables. Variables are named by appending '_USER', '_PASSWORD', '_PUBKEY' and '_PRIVKEY' to this base name, respectively. Variables whose values are not entered or are not present in the type of credential or vault selected, will not be set.
- * `password` - Password to assign to the environment variables.
- * `vault` - The name or reference of the vault to assign to the environment variables.
- * `hashicorp_vault_engine` - Vault engine name where the credential is stored.
- * `hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored.
- * `hashicorp_vault_username_key` - Hashicorp vault key for the username in the key-value store.
- * `hashicorp_vault_secret_key` - Hashicorp vault key for the password in the key-value store.
- * `azure_vault_name` - Azure key vault name.
- * `azure_vault_username_key` - Azure vault key in the key-value store.
- * `azure_vault_secret_key` - Azure vault key in the key-value store.
- * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
-
-* `ops_post_sync` - Operations to perform after syncing a created dSource.
- * `name` - Name of the hook
- * `command` - Command to be executed
- * `shell` - Type of shell. Valid values are `[bash, shell, expect, ps, psd]`
- * `credentials_env_vars` - List of environment variables that will contain credentials for this operation
- * `base_var_name` - Base name of the environment variables. Variables are named by appending '_USER', '_PASSWORD', '_PUBKEY' and '_PRIVKEY' to this base name, respectively. Variables whose values are not entered or are not present in the type of credential or vault selected, will not be set.
- * `password` - Password to assign to the environment variables.
- * `vault` - The name or reference of the vault to assign to the environment variables.
- * `hashicorp_vault_engine` - Vault engine name where the credential is stored.
- * `hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored.
- * `hashicorp_vault_username_key` - Hashicorp vault key for the username in the key-value store.
- * `hashicorp_vault_secret_key` - Hashicorp vault key for the password in the key-value store.
- * `azure_vault_name` - Azure key vault name.
- * `azure_vault_username_key` - Azure vault key in the key-value store.
- * `azure_vault_secret_key` - Azure vault key in the key-value store.
- * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
-
-* `excludes` - List of subdirectories in the source to exclude when syncing data.These paths are relative to the root of the source directory. [AppDataDirect only]
-
-* `follow_symlinks` - List of symlinks in the source to follow when syncing data.These paths are relative to the root of the source directory. All other symlinks are preserved. [AppDataDirect only]
+# Link dSource using Delphix Initiated Backup
+
+```hcl
+resource "delphix_appdata_dsource" "pg_using_delphix_initiated_backup" {
+ name = DSOURCE_NAME
+ source_value = SOURCE_VALUE
+ group_id = DATASET_GROUP_ID
+ log_sync_enabled = false
+ link_type = "AppDataStaged"
+ staging_mount_base = MOUNT_PATH
+ environment_user = ENV_USER
+ staging_environment = STAGING_ENV
+ parameters = jsonencode({
+ delphixInitiatedBackupFlag : true,
+ delphixInitiatedBackup : [
+ {
+ userName : USERNAME,
+ postgresSourcePort : SOURCE_PORT,
+ userPass : PASSWORD,
+ sourceHostAddress : SOURCE_ADDRESS
+ }
+ ],
+ postgresPort : PORT,
+ mountLocation : MOUNT_PATH
+ })
+ sync_parameters = jsonencode({
+ resync = true
+ })
+ make_current_account_owner = true
+}
+```
-* `parameters` - The JSON payload is based on the type of dSource being created. Different data sources require different parameters.
+# Link dSource using Single Database Ingestion
+
+```hcl
+resource "delphix_appdata_dsource" "pg_using_single_db_ingestion" {
+ name = DSOURCE_NAME
+ source_value = SOURCE_VALUE
+ group_id = DATASET_GROUP_ID
+ log_sync_enabled = false
+ link_type = "AppDataStaged"
+ staging_mount_base = MOUNT_PATH
+ environment_user = ENV_USER
+ staging_environment = STAGING_ENV
+ parameters = jsonencode({
+ singleDatabaseIngestionFlag : true,
+ singleDatabaseIngestion : [
+ {
+ databaseUserName: DBUSER_NAME,
+ sourcePort: SOURCE_PORT,
+ dumpJobs: 2,
+ restoreJobs: 2,
+ databaseName: DB_NAME,
+ databaseUserPassword: DB_PASS,
+ dumpDir: DIR,
+ sourceHost: SOURCE_HOST
+ postgresqlFile: FILE
+ }
+ ],
+ postgresPort : PORT,
+ mountLocation : MOUNT_PATH
+ })
+ sync_parameters = jsonencode({
+ resync = true
+ })
+ make_current_account_owner = true
+}
+```
-* `sync_parameters` - The JSON payload conforming to the snapshot parameters definition in a LUA toolkit or platform plugin.
+## Argument Reference
-* `skip_wait_for_snapshot_creation` - In DCT v2025.1, waiting for Ingestion and Snapshotting (aka SnapSync) to complete is default functionality. Therefore, these the arguments skip_wait_for_snapshot_creation and wait_time are ignored. In future versions of the provider, we will look at re-implementing the skip SnapSync behavior
+### General Linking Requirements
+* `name` - The unique name of the dSource. If unset, a name is randomly generated. [Updatable]
+* `source_value` - (Required) ID or Name of the Source to link.
+* `description` - The notes/description for the dSource. [Updatable]
+* `group_id` - (Required) ID of the dataset group where this dSource should belong to.
+* `rollback_on_failure` - When a dSource linking operation fails during SnapSync, it results in a tainted dsource on the engine. By setting this flag to true, the tainted dSource will be removed from both the Terraform state and the engine. By default, the flag is to false, meaning the tainted dSource is maintained on the Terraform state.
+
+### Full Backup and Transactional Log requirements
+* `log_sync_enabled` - (Required) True if LogSync should run for this database.
+* `link_type` - (Required) The type of link to create. Default is AppDataDirect.
+ * `AppDataDirect` - Represents the AppData specific parameters of a link request for a source directly replicated into the Delphix Continuous Data Engine.
+ * `AppDataStaged` - Represents the AppData specific parameters of a link request for a source with a staging source.
+
+#### AppDataDirect properties
+* `excludes` - List of subdirectories in the source to exclude when syncing data.These paths are relative to the root of the source directory.
+* `follow_symlinks` - List of symlinks in the source to follow when syncing data. These paths are relative to the root of the source directory. All other symlinks are preserved.
+
+#### AppDataStaged properties
+* `staging_mount_base` - The base mount point for the NFS mount on the staging environment.
+* `environment_user` - (Required) The OS user to use for linking. [Updatable]
+* `staging_environment` - (Required) The environment used as an intermediate stage to pull data into Delphix. [Updatable]
+* `staging_environment_user` - Specifies the environment user that accesses the staging environment. [Updatable]
+* `parameters` - The JSON payload is based on the type of dSource being created. Different data sources require different parameters. Available parameters can be found within the data connector’s schema.json. [Updatable]
+* `sync_parameters` - The JSON payload conforming to the snapshot parameters definition in a Continuous Data plugin.
+* `sync_policy_id` - The ID of the SnapSync policy for the dSource. [Updatable]
+* `retention_policy_id` - The ID of the Retention policy for the dSource. [Updatable]
+
+### Hooks
+Any combination of the following hooks can be provided on the AppData dSource resource. The available arguments are identical for each hook and are consolidated in a single list to save space.
+
+#### Names
+* `ops_pre_sync`: Operations to perform before syncing the created dSource. These operations can quiesce any data prior to syncing. See argument list below.
+* `ops_post_sync`: Operations to perform after syncing a created dSource. See argument list below.
+
+#### Arguments
+* `name` - Name of the hook [Updatable]
+* `command` - Command to be executed [Updatable]
+* `shell` - Type of shell. Valid values are [bash, shell, expect, ps, psd] [Updatable]
+* `credentials_env_vars` - List of environment variables that contain credentials for this operation
+ * `base_var_name` - Base name of the environment variables. Variables are named by appending '_USER', '_PASSWORD', '_PUBKEY' and '_PRIVKEY' to this base name, respectively. Variables whose values are not entered or present in the type of credential or vault selected will not be set.
+ * `password` - Password to assign to the environment variables.
+ * `vault` - The name or reference of the vault to assign to the environment variables.
+ * `hashicorp_vault_engine` - Vault engine name where the credential is stored.
+ * `hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored.
+ * `hashicorp_vault_username_key` - Hashicorp vault key for the username in the key-value store.
+ * `hashicorp_vault_secret_key` - Hashicorp vault key for the password in the key-value store.
+ * `azure_vault_name` - Azure key vault name.
+ * `azure_vault_username_key` - Azure vault key in the key-value store.
+ * `azure_vault_secret_key` - Azure vault key in the key-value store.
+ * `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
+
+### Initial Ingestion and Snapshot [Deprecated]
+The following arguments enable the user to control how the first ingestion and snapshot (SnapSync) should be taken.
+* `skip_wait_for_snapshot_creation` - In DCT v2025.1, waiting for Ingestion and Snapshotting (aka SnapSync) to complete is default functionality. Therefore, these the arguments skip_wait_for_snapshot_creation and wait_time are ignored. In future versions of the provider, we will look at re-implementing the skip SnapSync behavior
+* `wait_time` - In DCT v2025.1, waiting for Ingestion and Snapshotting (aka SnapSync) to complete is default functionality. Therefore, these the arguments skip_wait_for_snapshot_creation and wait_time are ignored. In future versions of the provider, we will look at re-implementing the skip SnapSync behavior.
+
+### Advanced
+The following arguments apply to all dSources but they are not often necessary for simple sources.
+* `make_current_account_owner` - (Required) True/False. Indicates whether the account creating this reporting schedule must be configured as owner of the reporting schedule.
+* `tags` - The tags to be created for dSource. This is a map of two parameters:
+ * `key` - (Required) Key of the tag
+ * `value` - (Required) Value of the tag
+* `ignore_tag_changes` – This flag enables whether changes in the tags are identified by Terraform. By default, this is set to true, meaning changes to the resource's tags are ignored.
+
+## Import
+Use the import block to add Appdata dSources created directly in DCT into a Terraform state file.
+
+For example:
+```hcl
+import {
+ to = delphix_appdata_dsource.dsrc_import_demo
+ id = "dsource_id"
+}
+```
-* `wait_time` - In DCT v2025.1, waiting for Ingestion and Snapshotting (aka SnapSync) to complete is default functionality. Therefore, these the arguments skip_wait_for_snapshot_creation and wait_time are ignored. In future versions of the provider, we will look at re-implementing the skip SnapSync behavior.
\ No newline at end of file
+## Limitations
+Not all properties can be updated using the update command. Attempts to update an unsupported property will return a runtime error message.
\ No newline at end of file
diff --git a/docs/resources/database_postgresql.md b/docs/resources/database_postgresql.md
index 1141198..6d435c6 100644
--- a/docs/resources/database_postgresql.md
+++ b/docs/resources/database_postgresql.md
@@ -24,45 +24,25 @@ resource "delphix_database_postgresql" "source" {
## Argument Reference
* `name` - (Required) The name of the new source.
-
* `repository_value` - (Required) The Id or Name of the Repository onto which the source will be created..
-
* `environment_value` - The Id or Name of the environment to create the source on.
-
* `engine_value` - The Id or Name of the engine to create the source on.
-
* `id` - The Source object entity ID.
-
* `database_type` - The type of this source database.
-
* `namespace_id` - The namespace id of this source database.
-
* `namespace_name` - The namespace name of this source database.
-
* `is_replica` - Is this a replicated object.
-
* `database_version` - The version of this source database.
-
* `data_uuid` - A universal ID that uniquely identifies this source database.
-
* `ip_address` - The IP address of the source's host.
-
* `fqdn` - The FQDN of the source's host.
-
* `size` - The total size of this source database, in bytes.
-
* `jdbc_connection_string` - The JDBC connection URL for this source database.
-
* `plugin_version` - The version of the plugin associated with this source database.
-
* `toolkit_id` - The ID of the toolkit associated with this source database(AppData only).
-
* `is_dsource` - Is this associated with dSource.
-
* `repository` - The repository id for this source.
-
* `appdata_source_type` - The type of this appdata source database (Appdata Only).
-
* `tags` - The tags to be created for database. This is a map of 2 parameters:
* `key` - Key of the tag
* `value` - Value of the tag
diff --git a/docs/resources/environment.md b/docs/resources/environment.md
index 0d8103c..2fa6206 100644
--- a/docs/resources/environment.md
+++ b/docs/resources/environment.md
@@ -1,213 +1,261 @@
# Resource: delphix_environment
-In Delphix, an environment is either a single instance host or cluster of hosts that run database software.
+In Delphix, an environment is either a single instance host or a cluster of hosts that runs your databases. Environments can either be:
+- Source: where data originates.
+- Staging: where data is prepared for ingestion. .
+- Target: where data is delivered and used by developers and testers.
+ - Note: Sometimes “Staging” is considered an intermediary environment which temporarily hosts a database for masking, subsetting, or synthetic data purposes. In virtualization, this is considered a Target environment.
+
+Each environment has unique properties and information depending on the operating system, installation, purpose, etc.
+The Delphix Environment resource (delphix_environment) in Terraform allows you to create, update, and delete Environments by enabling the apply, import, and destroy Terraform commands. Updating existing Delphix Environment resource parameters via the apply command is supported for the parameters specified below.
-Environments can either be a source (where data comes from), staging (where data are prepared/masked) or target (where data are delivered and used by developers and testers).
-
-Each environment has its own properties and information depending on the type of environment it is
+Note: In DCT, environment are frequently referred to as Infrastructure Connections.
## Example Usage
### Create UNIX standalone environment
-
```hcl
-resource "delphix_environment" "unix_env_name" {
- engine_id = 2
- os_name = "UNIX"
- username = "xxx"
- password = "xxx"
- hostname = "db.host.com"
- toolkit_path = "/home/delphix"
- name = "my-env"
- is_cluster = false
- cluster_home = "/home/ghrid"
- staging_environment = "stage"
- connector_port = 5312
- ssh_port = 22
- ase_db_password = "test"
- ase_db_username = "user-123"
- java_home = "/java/home"
- dsp_keystore_alias = "alias"
- dsp_keystore_password = "pass"
- dsp_keystore_path = "path"
- dsp_truststore_password = "pass"
- dsp_truststore_path = "/work"
- description = "desc"
- is_target = false
- }
+resource "delphix_environment" "unix_env_name" {
+ engine_id = 2
+ os_type = "UNIX"
+ name = "my-env"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "db.host.com"
+ toolkit_path = "/home/delphix"
+ ssh_port = 22
+ java_home = "/java/home"
+ }
+ is_cluster = false
+ cluster_home = "/home/ghrid"
+ staging_environment = "stage"
+ connector_port = 5312
+ ase_db_password = "test"
+ ase_db_username = "user-123"
+ dsp_keystore_alias = "alias"
+ dsp_keystore_password = "pass"
+ dsp_keystore_path = "path"
+ dsp_truststore_password = "pass"
+ dsp_truststore_path = "/work"
+ description = "desc"
+ is_target = false
+ }
```
-### Create UNIX cluster
+
+### Creating a WINDOWS standalone target environment
```hcl
-resource "delphix_environment" "unixcluster" {
- engine_id = 2
- os_name = "UNIX"
- username = "xxx"
- password = "xxx"
- hostname = "db.host.com"
- toolkit_path = "/home/delphix"
- name = "unixcluster"
- description = "This is a unix target."
- is_cluster = true
- cluster_home = "/u01/app/19.0.0.0/grid"
- }
+resource "delphix_environment" "win_tgt" {
+ engine_id = 2
+ os_type = "WINDOWS"
+ name = "wintgt"
+ description = "This is a windows target."
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ ssh_port = 22
+ }
+ connector_port = 9100
+ }
```
-### Creating UNIX standalone target environment using HashiCorp Vault
+### Creating a WINDOWS standalone source environment
```hcl
-resource "delphix_environment" "wintgt" {
- engine_id = 2
- os_name = "UNIX"
- hostname = "xxx"
- toolkit_path = "/home/delphix"
- name = "unixtgt"
-
- vault = "vault-name"
- hashicorp_vault_engine = "xxx"
- hashicorp_vault_secret_path = "xxx"
- hashicorp_vault_username_key = "xxx"
- hashicorp_vault_secret_key = "xxx"
-
- description = "This is unix target."
- }
+resource "delphix_environment" "win_standalone" {
+ engine_id = 2
+ os_type = "WINDOWS"
+ name = "WindowsSrc"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "db.host.com"
+ }
+ staging_environment = delphix_environment.wintgt.id
+ }
```
-### Creating UNIX standalone target environment using CyberArk Vault
+
+## Argument Reference
+
+### General Linking Requirements
+* `name` - The name of the environment. [Updatable]
+* `description` - The environment description. [Updatable]
+* `os_type` - (Required) Operating system type of the environment. Valid values are [UNIX, WINDOWS]
+* `engine_id` - (Required) The ID of the Engine on which to create the environment. The ID can be obtained by querying the DCT Engines API. A Delphix Engine must be registered with DCT first for it to create an Engine ID.
+* `is_cluster` - Whether the environment to be created is a cluster.
+* `cluster_home` - Absolute path to cluster home directory. This parameter is (Required) for UNIX cluster environments. [Updatable]
+* `staging_environment` - ID of the environment where Delphix (Windows) Connector is installed. This is a required parameter when creating Windows source environments.
+* `connector_port` - The port on which Delphix connector will run. This is a (Required) parameter when creating Windows target environments. [Updatable]
+* `is_target` - Indicates whether the environment to be created is a target cluster environment. This property is used only when creating Windows cluster environments.
+
+### Host Arguments
+* `hostname` - (Required) Host Name or IP Address of the host that being added to Delphix. [Updatable]
+* `ssh_port` - ssh port of the environment. [Updatable]
+* `toolkit_path` - The path where the Delphix Toolkit can be placed. [Updatable]
+* `oracle_tde_keystores_root_path` - The path to the root of the Oracle TDE keystores artifact directories. [Updatable]
+* `java_home` - The path to the user managed Java Development Kit (JDK). If not specified, then the OpenJDK will be used. [Updatable]
+* `nfs_addresses` - Array of IP address or hostnames. Valid values are a list of addresses. For eg: ["192.168.10.2"] [Updatable]
+
+### General Authentication Arguments
+* `dsp_keystore_path` - DSP keystore path.
+* `dsp_keystore_password` - DSP keystore password.
+* `dsp_keystore_alias` - DSP keystore alias.
+* `dsp_truststore_path` - DSP truststore path.
+* `dsp_truststore_password` - DSP truststore password.
+* `use_engine_public_key` - Indicates whether to use public key authentication.
+
+### SQL Server Authentication Arguments
+* `username` - OS username to enable a connection from the engine. [Updatable]
+* `password` - OS user's password. [Updatable]
+* `vault` - The name or reference of the vault from which to read the host credentials.
+* `hashicorp_vault_engine` – The Hashicorp Vault engine name where the credential is stored.
+* `hashicorp_vault_secret_path` - Path in the Hashicorp Vault engine where the credential is stored.
+* `hashicorp_vault_username_key` - Key for the username in the key-value store.
+* `hashicorp_vault_secret_key` - Key for the password in the key-value store.
+* `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
+* `use_kerberos_authentication` - Indicates whether to use Kerberos authentication.
+
+### SAP ASE (Sybase) Authentication Arguments
+* `ase_db_username` - Username for the SAP ASE database.
+* `ase_db_password` - Password for the SAP ASE database.
+* `ase_db_vault` - The name or reference of the vault from which to read the SAP ASE database credentials.
+* `ase_db_hashicorp_vault_engine` – The Hashicorp Vault engine name where the credential is stored.
+* `ase_db_hashicorp_vault_secret_path` - Path in the Hashicorp Vault engine where the credential is stored.
+* `ase_db_hashicorp_vault_username_key` - Key for the username in the key-value store.
+* `ase_db_hashicorp_vault_secret_key` - Key for the password in the key-value store.
+* `ase_db_cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
+* `ase_db_use_kerberos_authentication` - Whether to use Kerberos authentication for SAP ASE DB discovery.
+
+### Advanced Arguments
+* `tags` - The tags to be created for this environment. This is a map of two parameters: [Updatable]
+ * `key` - (Required) Key of the tag
+ * `value` - (Required) Value of the tag
+* `ignore_tag_changes` – This flag enables whether changes in the tags are identified by Terraform. By default, this is set to true, meaning changes to the resource's tags are ignored.
+
+## Import
+Use the import block to add Environments created directly in DCT into a Terraform state file.
+
+For example:
```hcl
-resource "delphix_environment" "wintgt" {
- engine_id = 2
- os_name = "UNIX"
- hostname = "xxx"
- toolkit_path = "/home/delphix"
- name = "unixtgt"
-
- vault = "vault-name"
- cyberark_query_string = "xxx"
-
- description = "This is unix target."
- }
-```
-### Creating a WINDOWS standalone target environment
+import {
+ to = delphix_environment.env_import_demo
+ id = "env_id"
+}
+```
+
+## Limitations
+Not all properties are supported through the update command. Properties that are not supported by the update command are presented via an error message at runtime.
+
+## Appendix
+Here are some additional examples:
+
+### Create UNIX cluster
```hcl
-resource "delphix_environment" "wintgt" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "wintgt"
- connector_port = 9100
- ssh_port = 22
- description = "This is a windows target."
- }
+resource "delphix_environment" "unix_cluster" {
+ engine_id = 2
+ os_type = "UNIX"
+ name = "unixcluster"
+ description = "This is a unix target."
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "db.host.com"
+ toolkit_path = "/home/delphix"
+ }
+ is_cluster = true
+ cluster_home = "/u01/app/19.0.0.0/grid"
+ }
```
-### Creating a WINDOWS standalone source environment
+
+### Creating UNIX standalone target environment using HashiCorp Vault
```hcl
-resource "delphix_environment" "WindowsSrc" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "db.host.com"
- name = "WindowsSrc"
- staging_environment = delphix_environment.wintgt.id
- }
-```
-### Creating a WINDOWS cluster source environment
+resource "delphix_environment" "unix_with_hashi_vault" {
+ engine_id = 2
+ os_type = "UNIX"
+ name = "unixtgt"
+ hosts {
+ hostname = "xxx"
+ toolkit_path = "/home/delphix"
+ }
+ vault = "vault-name"
+ hashicorp_vault_engine = "xxx"
+ hashicorp_vault_secret_path = "xxx"
+ hashicorp_vault_username_key = "xxx"
+ hashicorp_vault_secret_key = "xxx"
+
+ description = "This is unix target."
+ }
+```
+
+### Creating UNIX standalone target environment using CyberArk Vault
```hcl
-resource "delphix_environment" "winsrc-cluster" {
- engine_id = 2
- is_target = false
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "winsrc-cluster"
- staging_environment = delphix_environment.wintgt.id
- is_cluster = true
- }
-```
-### Creating a WINDOWS failover cluster that can be used as target
+resource "delphix_environment" "unix_with_ca_vault" {
+ engine_id = 2
+ os_type = "UNIX"
+ name = "unixtgt"
+ description = "This is unix target."
+ hosts {
+ hostname = "xxx"
+ toolkit_path = "/home/delphix"
+ }
+ vault = "vault-name"
+ cyberark_query_string = "xxx"
+ }
+```
+
+### Creating a WINDOWS cluster source environment
```hcl
-resource "delphix_environment" "fc-cluster-0" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "fc-cluster-0"
- connector_port = 9100
- description = "This is an FC cluster"
- }
- resource "delphix_environment" "fc-cluster-1" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "fc-cluster-1"
- connector_port = 9100
- description = "This is an FC cluster."
- }
-resource "delphix_environment" "fc-tgt-cluster" {
- engine_id = 2
- is_target = true
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "db.host.com"
- name = "fc-tgt-cluster"
- staging_environment = delphix_environment.fc-cluster-1.id
- is_cluster = true
- }
+resource "delphix_environment" "winsrc_cluster" {
+ engine_id = 2
+ is_target = false
+ os_type = "WINDOWS"
+ name = "winsrc-cluster"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ staging_environment = delphix_environment.wintgt.id
+ is_cluster = true
+ }
+```
+### Creating a WINDOWS failover cluster that can be used as target
+```hcl
+resource "delphix_environment" "win_fc_cluster_0" {
+ engine_id = 2
+ os_type = "WINDOWS"
+ name = "fc-cluster-0"
+ description = "This is an FC cluster"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ connector_port = 9100
+ }
+ resource "delphix_environment" "win_fc_cluster_1" {
+ engine_id = 2
+ os_type = "WINDOWS"
+ name = "fc-cluster-1"
+ description = "This is an FC cluster."
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ connector_port = 9100
+ }
+resource "delphix_environment" "win_fc_tgt_cluster" {
+ engine_id = 2
+ is_target = true
+ os_type = "WINDOWS"
+ name = "fc-tgt-cluster"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "db.host.com"
+ }
+ staging_environment = delphix_environment.fc-cluster-1.id
+ is_cluster = true
+ }
```
-
-## Argument Reference
-
-* `engine_id` - (Required) The DCT ID of the Engine on which to create the environment. This ID can be obtained by querying the DCT engines API. A Delphix Engine must be registered with DCT first for it to create an Engine ID.
-* `os_name` - (Required) Operating system type of the environment. Valid values are `[UNIX, WINDOWS]`
-* `hostname` - (Required) Host Name or IP Address of the host that being added to Delphix.
-* `name` - The name of the environment.
-* `is_cluster` - Whether the environment to be created is a cluster.
-* `cluster_home` - Absolute path to cluster home drectory. This parameter is (Required) for UNIX cluster environments.
-* `staging_environment` - Id of the environment where Delphix Connector is installed. This is a (Required) parameter when creating Windows source environments.
-* `connector_port` - Specify port on which Delphix connector will run. This is a (Required) parameter when creating Windows target environments.
-* `is_target` - Whether the environment to be created is a target cluster environment. This property is used only when creating Windows cluster environments.
-* `ssh_port` - ssh port of the environment.
-* `toolkit_path` - The path where Delphix toolkit can be pushed.
-* `username` - OS username for Delphix.
-* `password` - OS user's password.
-* `vault` - The name or reference of the vault from which to read the host credentials.
-* `hashicorp_vault_engine` - Vault engine name where the credential is stored.
-* `hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored.
-* `hashicorp_vault_username_key` - Key for the username in the key-value store.
-* `hashicorp_vault_secret_key` - Key for the password in the key-value store.
-* `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
-* `use_kerberos_authentication` - Whether to use kerberos authentication.
-* `use_engine_public_key` - Whether to use public key authentication.
-* `nfs_addresses` - Array of ip address or hostnames. Valid values are a list of addresses. For eg: `["192.168.10.2"]`
-* `ase_db_username` - Username for the SAP ASE database.
-* `ase_db_password` - Password for the SAP ASE database.
-* `ase_db_vault` - The name or reference of the vault from which to read the ASE database credentials.
-* `ase_db_hashicorp_vault_engine` - Vault engine name where the credential is stored.
-* `ase_db_hashicorp_vault_secret_path` - Path in the vault engine where the credential is stored.
-* `ase_db_hashicorp_vault_username_key` - Key for the username in the key-value store.
-* `ase_db_hashicorp_vault_secret_key` - Key for the password in the key-value store.
-* `ase_db_cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
-* `ase_db_use_kerberos_authentication` - Whether to use kerberos authentication for ASE DB discovery.
-* `java_home` - The path to the user managed Java Development Kit (JDK). If not specified, then the OpenJDK will be used.
-* `dsp_keystore_path` - DSP keystore path.
-* `dsp_keystore_password` - DSP keystore password.
-* `dsp_keystore_alias` - DSP keystore alias.
-* `dsp_truststore_path` - DSP truststore path.
-* `dsp_truststore_password` - DSP truststore password.
-* `description` - The environment description.
-* `tags` - The tags to be created for this environment. This is a map of 2 parameters:
- * `key` - (Required) Key of the tag
- * `value` - (Required) Value of the tag
-
-## Attribute Reference
-
-* `namespace` - The namespace of this environment for replicated and restored objects.
-* `engine_id` - A reference to the Engine that this Environment connection is associated with.
-* `enabled` - True if this environment is enabled.
-* `hosts` - The hosts that are part of this environment.
-* `repositories` - The repositories that are part of this environment.
diff --git a/docs/resources/oracle_dsource.md b/docs/resources/oracle_dsource.md
index 403f803..a9b6d7b 100644
--- a/docs/resources/oracle_dsource.md
+++ b/docs/resources/oracle_dsource.md
@@ -41,7 +41,7 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" {
## Argument References
- ### General Linking Requirements
+### General Linking Requirements
* `name` - The unique name of the dSource. If empty, a name is randomly generated. [Updatable]
* `source_value` - (Required) ID or name of the source to link.
@@ -116,6 +116,7 @@ The following arguments apply to all dSources but they are not often necessary f
* `tags` - The tags to be created for dSource. This is a map of 2 parameters: [Updatable]
* `key` - (Required) Key of the tag
* `value` - (Required) Value of the tag
+* `ignore_tag_changes` – This flag enables whether changes in the tags are identified by Terraform. By default, this is set to true, meaning changes to the resource's tags are ignored.
### Hooks
Any combination of the following hooks can be provided on the Oracle dSource resource. The available arguments are identical for each hook and are consolidated in a single list to save space.
@@ -142,7 +143,7 @@ Any combination of the following hooks can be provided on the Oracle dSource res
* `azure_vault_secret_key` - Azure vault key in the key-value store.
* `cyberark_vault_query_string` - Query to find a credential in the CyberArk vault.
-## Import (Beta)
+## Import
Use the [`import` block](https://developer.hashicorp.com/terraform/language/import) to add Oracle Dsources created directly in DCT into a Terraform state file.
For example:
@@ -152,7 +153,6 @@ import {
id = "dsource_id"
}
```
-*This is a beta feature. Delphix offers no guarantees of support or compatibility.*
## Limitations
diff --git a/docs/resources/vdb.md b/docs/resources/vdb.md
index 69d10e1..e5905d5 100644
--- a/docs/resources/vdb.md
+++ b/docs/resources/vdb.md
@@ -224,8 +224,9 @@ This is a map of two required parameters:
* `key` - Key of the tag.
* `value` - Value of the tag.
* `make_current_account_owner` - Default True. Boolean to determine if the account provisioning this VDB will be the "Owner" of the VDB.
+* `ignore_tag_changes` – This flag enables whether changes in the tags are identified by Terraform. By default, this is set to true, meaning changes to the resource's tags are ignored.
-## Import (Beta)
+## Import
Use the [`import` block](https://developer.hashicorp.com/terraform/language/import) to add VDBs created directly in DCT into a Terraform state file.
For example:
@@ -235,7 +236,6 @@ import {
id = "vdb_id"
}
```
-*This is a beta feature. Delphix offers no guarantees of support or compatibility.*
## Limitations
diff --git a/examples/environment/main.tf b/examples/environment/main.tf
index 668a864..d94f480 100644
--- a/examples/environment/main.tf
+++ b/examples/environment/main.tf
@@ -15,28 +15,30 @@ provider "delphix" {
/* Unix Standalone */
resource "delphix_environment" "unixtgt" {
- engine_id = 1
- os_name = "UNIX"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- toolkit_path = "/home/delphix_os/toolkit"
- name = "unixtgt"
- description = "This is a unix target."
+ engine_id = 1
+ os_type = "UNIX"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ toolkit_path = "/home/delphix_os/toolkit"
+ }
+ name = "unixtgt"
+ description = "This is a unix target."
}
/* Unix Standalone using Hashicorp vault
resource "delphix_environment" "unixtgt" {
engine_id = 1
- os_name = "UNIX"
- hostname = "xxx"
-
+ os_type = "UNIX"
+ hosts {
+ hostname = "xxx"
+ }
vault = "xxx"
hashicorp_vault_engine = "xxx"
hashicorp_vault_secret_path = "xxx"
hashicorp_vault_username_key = "xxx"
hashicorp_vault_secret_key = "xxx"
-
toolkit_path = "/home/delphix_os/toolkit"
name = "unixtgt"
description = "This is a unix target."
@@ -45,12 +47,12 @@ resource "delphix_environment" "unixtgt" {
/* Unix Standalone using CyberArk vault
resource "delphix_environment" "unixtgt" {
engine_id = 1
- os_name = "UNIX"
- hostname = "xxx"
-
+ os_type = "UNIX"
+ hosts {
+ hostname = "xxx"
+ }
vault = "xxx"
- cyberark_query_string = "xxx"
-
+ cyberark_vault_query_string = "xxx"
toolkit_path = "/home/delphix_os/toolkit"
name = "unixtgt"
description = "This is a unix target."
@@ -58,114 +60,130 @@ resource "delphix_environment" "unixtgt" {
/* Win Standalone - Target*/
/* resource "delphix_environment" "wintgt" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "wintgt"
- connector_port = 9100
- ssh_port = 22
- description = "This is a windows target."
+ engine_id = 2
+ os_type = "WINDOWS"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ ssh_port = 22
+ }
+ name = "wintgt"
+ connector_port = 9100
+ description = "This is a windows target."
} */
/* Win Standalone - Source*/
/* resource "delphix_environment" "WindowsSrc" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "WindowsSrc"
- staging_environment = delphix_environment.wintgt.id
+ engine_id = 2
+ os_type = "WINDOWS"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ name = "WindowsSrc"
+ staging_environment = delphix_environment.wintgt.id
} */
/* Unix Standalone - All Params */
/* resource "delphix_environment" "env_name" {
- engine_id = 2
- os_name = "UNIX"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- toolkit_path = "/home/delphix"
- name = "Test"
- is_cluster = false
- cluster_home = "/home/ghrid"
- staging_environment = "stage"
- connector_port = 5312
- ssh_port = 22
- ase_db_password = "pass"
- ase_db_username = "user"
- java_home = "/j/h"
- dsp_keystore_alias = "alias"
- dsp_keystore_password = "pass"
- dsp_keystore_path = "path"
- dsp_truststore_password = "pass"
- dsp_truststore_path = "path"
- description = "desc"
- is_target = false
+ engine_id = 2
+ os_type = "UNIX"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ ssh_port = 22
+ java_home = "/j/h"
+ }
+ toolkit_path = "/home/delphix"
+ name = "Test"
+ is_cluster = false
+ cluster_home = "/home/ghrid"
+ staging_environment = "stage"
+ connector_port = 5312
+ ase_db_password = "pass"
+ ase_db_username = "user"
+ dsp_keystore_alias = "alias"
+ dsp_keystore_password = "pass"
+ dsp_keystore_path = "path"
+ dsp_truststore_password = "pass"
+ dsp_truststore_path = "path"
+ description = "desc"
+ is_target = false
} */
/* Win Cluster - Source*/
/* resource "delphix_environment" "winsrc-cluster" {
- engine_id = 2
- is_target = false
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "winsrc-cluster"
- staging_environment = delphix_environment.wintgt.id
- is_cluster = true
+ engine_id = 2
+ is_target = false
+ os_type = "WINDOWS"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ name = "winsrc-cluster"
+ staging_environment = delphix_environment.wintgt.id
+ is_cluster = true
} */
/* Unix Cluster */
/* resource "delphix_environment" "unixcluster" {
- engine_id = 2
- os_name = "UNIX"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- toolkit_path = "/work"
- name = "unixcluster"
- description = "This is a unix target."
- is_cluster = true
- cluster_home = "/u01/app/19.0.0.0/grid"
+ engine_id = 2
+ os_type = "UNIX"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ toolkit_path = "/work"
+ name = "unixcluster"
+ description = "This is a unix target."
+ is_cluster = true
+ cluster_home = "/u01/app/19.0.0.0/grid"
} */
/* Windows Failover Cluster - Used as target */
/* resource "delphix_environment" "fc-cluster-0" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "fc-cluster-0"
- connector_port = 9100
- description = "This is an FC cluster"
+ engine_id = 2
+ os_type = "WINDOWS"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ name = "fc-cluster-0"
+ connector_port = 9100
+ description = "This is an FC cluster"
}
resource "delphix_environment" "fc-cluster-1" {
- engine_id = 2
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "fc-cluster-1"
- connector_port = 9100
- description = "This is an FC cluster."
+ engine_id = 2
+ os_type = "WINDOWS"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ name = "fc-cluster-1"
+ connector_port = 9100
+ description = "This is an FC cluster."
}
resource "delphix_environment" "fc-tgt-cluster" {
- engine_id = 2
- is_target = true
- os_name = "WINDOWS"
- username = "xxx"
- password = "xxx"
- hostname = "xxx"
- name = "fc-tgt-cluster"
- staging_environment = "2-WINDOWS_HOST_ENVIRONMENT-35"
- is_cluster = true
+ engine_id = 2
+ is_target = true
+ os_type = "WINDOWS"
+ username = "xxx"
+ password = "xxx"
+ hosts {
+ hostname = "xxx"
+ }
+ name = "fc-tgt-cluster"
+ staging_environment = "2-WINDOWS_HOST_ENVIRONMENT-35"
+ is_cluster = true
} */
\ No newline at end of file
diff --git a/go.mod b/go.mod
index b1f941a..5506ae3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,11 +1,11 @@
module terraform-provider-delphix
-go 1.22.0
+go 1.23.0
-toolchain go1.22.6
+toolchain go1.23.7
require (
- github.com/delphix/dct-sdk-go/v25 v25.1.2
+ github.com/delphix/dct-sdk-go/v25 v25.2.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0
)
@@ -47,10 +47,10 @@ require (
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/zclconf/go-cty v1.14.2 // indirect
- golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/crypto v0.36.0 // indirect
golang.org/x/mod v0.17.0 // indirect
- golang.org/x/sys v0.28.0 // indirect
- golang.org/x/text v0.21.0 // indirect
+ golang.org/x/sys v0.31.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/grpc v1.61.1 // indirect
)
@@ -59,7 +59,7 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.2.0 // indirect
- golang.org/x/net v0.33.0 // indirect
+ golang.org/x/net v0.38.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
diff --git a/go.sum b/go.sum
index 58923b1..7cc42f0 100644
--- a/go.sum
+++ b/go.sum
@@ -19,8 +19,8 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/delphix/dct-sdk-go/v25 v25.1.2 h1:wiJui4cZB4xK9Znu9JdWb5N3rKqbnz1oXWaqLXjGHnE=
-github.com/delphix/dct-sdk-go/v25 v25.1.2/go.mod h1:Y//bIbAZP6SZhLLZAQMxEfeRXvsvKQwu/kSR8a5hfqc=
+github.com/delphix/dct-sdk-go/v25 v25.2.0 h1:djFGvJwDHE99vBFa5ZlixcV49niz7nRsuwfue8l/AQA=
+github.com/delphix/dct-sdk-go/v25 v25.2.0/go.mod h1:fCw+bOFPHiNcqUGvRpOEq4PINgcmw6KptstJy4v66Uo=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
@@ -148,8 +148,8 @@ github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI
github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
-golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
@@ -157,13 +157,13 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -176,19 +176,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
-golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
diff --git a/internal/provider/commons.go b/internal/provider/commons.go
index d05e212..84fb3d7 100644
--- a/internal/provider/commons.go
+++ b/internal/provider/commons.go
@@ -52,6 +52,7 @@ var updatableVdbKeys = map[string]bool{
"mount_point": true,
"tags": true,
"database_name": true,
+ "ignore_tag_changes": true,
}
var isDestructiveVdbUpdate = map[string]bool{
@@ -89,6 +90,7 @@ var isDestructiveVdbUpdate = map[string]bool{
"new_dbid": false,
"mount_point": true,
"tags": false,
+ "ignore_tag_changes": false,
}
var updatableOracleDsourceKeys = map[string]bool{
@@ -109,4 +111,46 @@ var updatableOracleDsourceKeys = map[string]bool{
"ops_pre_sync": true,
"ops_pre_log_sync": true,
"ops_post_sync": true,
+ "ignore_tag_changes": true,
+ "rollback_on_failure": true,
+}
+
+var updatableEnvKeys = map[string]bool{
+ "name": true,
+ "cluster_home": true,
+ "connector_port": true,
+ "username": true,
+ "password": true,
+ "description": true,
+ "tags": true,
+ "hosts": true,
+ "ignore_tag_changes": true,
+}
+
+var isDestructiveEnvUpdate = map[string]bool{
+ "name": false,
+ "cluster_home": true,
+ "connector_port": true,
+ "username": true,
+ "password": true,
+ "description": false,
+ "tags": false,
+ "hosts": true,
+ "ignore_tag_changes": false,
+}
+
+var updatableAppdataDsourceKeys = map[string]bool{
+ "name": true,
+ "description": true,
+ "staging_environment": true,
+ "staging_environment_user": true,
+ "environment_user": true,
+ "parameters": true,
+ "sync_policy_id": true,
+ "retention_policy_id": true,
+ "ops_pre_sync": true,
+ "ops_post_sync": true,
+ "tags": true,
+ "ignore_tag_changes": true,
+ "rollback_on_failure": true,
}
diff --git a/internal/provider/resource_appdata_dsource.go b/internal/provider/resource_appdata_dsource.go
index f3b465f..f50b606 100644
--- a/internal/provider/resource_appdata_dsource.go
+++ b/internal/provider/resource_appdata_dsource.go
@@ -4,6 +4,8 @@ import (
"context"
"encoding/json"
"net/http"
+ "reflect"
+ "strings"
dctapi "github.com/delphix/dct-sdk-go/v25"
"github.com/hashicorp/terraform-plugin-log/tflog"
@@ -33,11 +35,33 @@ func resourceAppdataDsource() *schema.Resource {
},
"source_value": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ if oldValue != newValue {
+ tflog.Info(context.Background(), "updating source_value is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
},
"group_id": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ if oldValue != newValue {
+ tflog.Info(context.Background(), "updating group_id is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
+ },
+ "sync_policy_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "retention_policy_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
},
"description": {
Type: schema.TypeString,
@@ -53,15 +77,22 @@ func resourceAppdataDsource() *schema.Resource {
},
"link_type": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ tflog.Info(context.Background(), "In DiffSuppressFunc of link_type")
+ if oldValue != newValue {
+ tflog.Info(context.Background(), "updating link_type is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
},
"staging_mount_base": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
},
"staging_environment": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
},
"staging_environment_user": {
Type: schema.TypeString,
@@ -69,11 +100,17 @@ func resourceAppdataDsource() *schema.Resource {
},
"environment_user": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
+ },
+ "ignore_tag_changes": {
+ Type: schema.TypeBool,
+ Default: true,
+ Optional: true,
},
"tags": {
Type: schema.TypeList,
Optional: true,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
@@ -86,6 +123,12 @@ func resourceAppdataDsource() *schema.Resource {
},
},
},
+ DiffSuppressFunc: func(_, old, new string, d *schema.ResourceData) bool {
+ if ignore, ok := d.GetOk("ignore_tag_changes"); ok && ignore.(bool) {
+ return true
+ }
+ return false
+ },
},
"ops_pre_sync": {
Type: schema.TypeList,
@@ -103,7 +146,16 @@ func resourceAppdataDsource() *schema.Resource {
"shell": {
Type: schema.TypeString,
Optional: true,
- }, "credentials_env_vars": {
+ },
+ "element_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "has_credentials": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ "credentials_env_vars": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
@@ -174,7 +226,16 @@ func resourceAppdataDsource() *schema.Resource {
"shell": {
Type: schema.TypeString,
Optional: true,
- }, "credentials_env_vars": {
+ },
+ "element_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "has_credentials": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ "credentials_env_vars": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
@@ -245,11 +306,17 @@ func resourceAppdataDsource() *schema.Resource {
},
"parameters": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
},
"sync_parameters": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ if oldValue != newValue {
+ tflog.Info(context.Background(), "updating sync_parameters is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
},
// Output
"id": {
@@ -332,71 +399,31 @@ func resourceAppdataDsource() *schema.Resource {
Type: schema.TypeInt,
Default: 0,
Optional: true,
+ DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
+ if old != new {
+ tflog.Info(context.Background(), "updating wait_time is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
},
"skip_wait_for_snapshot_creation": {
Type: schema.TypeBool,
Default: false,
Optional: true,
+ DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
+ if old != new {
+ tflog.Info(context.Background(), "updating skip_wait_for_snapshot_creation is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
},
},
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
}
}
-func toSourceOperationArray(array interface{}) []dctapi.SourceOperation {
- items := []dctapi.SourceOperation{}
- for _, item := range array.([]interface{}) {
- item_map := item.(map[string]interface{})
- sourceOperation := dctapi.NewSourceOperation(item_map["name"].(string), item_map["command"].(string))
- if item_map["shell"].(string) != "" {
- sourceOperation.SetShell(item_map["shell"].(string))
- }
- sourceOperation.SetCredentialsEnvVars(toCredentialsEnvVariableArray(item_map["credentials_env_vars"]))
- items = append(items, *sourceOperation)
- }
- return items
-}
-
-func toCredentialsEnvVariableArray(array interface{}) []dctapi.CredentialsEnvVariable {
- items := []dctapi.CredentialsEnvVariable{}
- for _, item := range array.([]interface{}) {
- item_map := item.(map[string]interface{})
-
- credentialsEnvVariable_item := dctapi.NewCredentialsEnvVariable(item_map["base_var_name"].(string))
- if item_map["password"].(string) != "" {
- credentialsEnvVariable_item.SetPassword(item_map["password"].(string))
- }
- if item_map["vault"].(string) != "" {
- credentialsEnvVariable_item.SetVault(item_map["vault"].(string))
- }
- if item_map["hashicorp_vault_engine"].(string) != "" {
- credentialsEnvVariable_item.SetHashicorpVaultEngine(item_map["hashicorp_vault_engine"].(string))
- }
- if item_map["hashicorp_vault_secret_path"].(string) != "" {
- credentialsEnvVariable_item.SetHashicorpVaultSecretPath(item_map["hashicorp_vault_secret_path"].(string))
- }
- if item_map["hashicorp_vault_username_key"].(string) != "" {
- credentialsEnvVariable_item.SetHashicorpVaultUsernameKey(item_map["hashicorp_vault_username_key"].(string))
- }
- if item_map["hashicorp_vault_secret_key"].(string) != "" {
- credentialsEnvVariable_item.SetHashicorpVaultSecretKey(item_map["hashicorp_vault_secret_key"].(string))
- }
- if item_map["azure_vault_name"].(string) != "" {
- credentialsEnvVariable_item.SetAzureVaultName(item_map["azure_vault_name"].(string))
- }
- if item_map["azure_vault_username_key"].(string) != "" {
- credentialsEnvVariable_item.SetAzureVaultUsernameKey(item_map["azure_vault_username_key"].(string))
- }
- if item_map["azure_vault_secret_key"].(string) != "" {
- credentialsEnvVariable_item.SetAzureVaultSecretKey(item_map["azure_vault_secret_key"].(string))
- }
- if item_map["cyberark_vault_query_string"].(string) != "" {
- credentialsEnvVariable_item.SetCyberarkVaultQueryString(item_map["cyberark_vault_query_string"].(string))
- }
- items = append(items, *credentialsEnvVariable_item)
- }
- return items
-}
-
func resourceAppdataDsourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
var diags diag.Diagnostics
client := meta.(*apiClient).client
@@ -409,16 +436,22 @@ func resourceAppdataDsourceCreate(ctx context.Context, d *schema.ResourceData, m
if v, has_v := d.GetOk("source_value"); has_v {
appDataDSourceLinkSourceParameters.SetSourceId(v.(string))
}
+ if v, has_v := d.GetOk("sync_policy_id"); has_v {
+ appDataDSourceLinkSourceParameters.SetSyncPolicyId(v.(string))
+ }
+ if v, has_v := d.GetOk("retention_policy_id"); has_v {
+ appDataDSourceLinkSourceParameters.SetRetentionPolicyId(v.(string))
+ }
if v, has_v := d.GetOk("group_id"); has_v {
appDataDSourceLinkSourceParameters.SetGroupId(v.(string))
}
if v, has_v := d.GetOk("description"); has_v {
appDataDSourceLinkSourceParameters.SetDescription(v.(string))
}
- if v, has_v := d.GetOkExists("log_sync_enabled"); has_v {
+ if v, has_v := d.GetOk("log_sync_enabled"); has_v {
appDataDSourceLinkSourceParameters.SetLogSyncEnabled(v.(bool))
}
- if v, has_v := d.GetOkExists("make_current_account_owner"); has_v {
+ if v, has_v := d.GetOk("make_current_account_owner"); has_v {
appDataDSourceLinkSourceParameters.SetMakeCurrentAccountOwner(v.(bool))
}
if v, has_v := d.GetOk("link_type"); has_v {
@@ -445,10 +478,10 @@ func resourceAppdataDsourceCreate(ctx context.Context, d *schema.ResourceData, m
if v, has_v := d.GetOk("ops_post_sync"); has_v {
appDataDSourceLinkSourceParameters.SetOpsPostSync(toSourceOperationArray(v))
}
- if v, has_v := d.GetOkExists("excludes"); has_v {
+ if v, has_v := d.GetOk("excludes"); has_v {
appDataDSourceLinkSourceParameters.SetExcludes(toStringArray(v))
}
- if v, has_v := d.GetOkExists("follow_symlinks"); has_v {
+ if v, has_v := d.GetOk("follow_symlinks"); has_v {
appDataDSourceLinkSourceParameters.SetFollowSymlinks(toStringArray(v))
}
if v, has_v := d.GetOk("parameters"); has_v {
@@ -550,11 +583,17 @@ func resourceDsourceRead(ctx context.Context, d *schema.ResourceData, meta inter
return diag.Errorf("Error occured in type casting.")
}
- _, rollback_on_failure_exists := d.GetOk("rollback_on_failure")
- if !rollback_on_failure_exists {
- // its an import or upgrade, set to default value
- d.Set("rollback_on_failure", false)
- }
+ ops_pre_sync_Raw, _ := d.Get("ops_pre_sync").([]interface{})
+ oldOpsPreSync := toSourceOperationArray(ops_pre_sync_Raw)
+
+ ops_post_sync_Raw, _ := d.Get("ops_post_sync").([]interface{})
+ oldOpsPostSync := toSourceOperationArray(ops_post_sync_Raw)
+
+ // _, rollback_on_failure_exists := d.GetOk("rollback_on_failure")
+ // if !rollback_on_failure_exists {
+ // // its an import or upgrade, set to default value
+ // d.Set("rollback_on_failure", false)
+ // }
d.Set("id", result.GetId())
d.Set("database_type", result.GetDatabaseType())
@@ -570,25 +609,165 @@ func resourceDsourceRead(ctx context.Context, d *schema.ResourceData, meta inter
d.Set("engine_name", result.GetEngineName())
d.Set("current_timeflow_id", result.GetCurrentTimeflowId())
d.Set("is_appdata", result.GetIsAppdata())
-
+ d.Set("sync_policy_id", result.GetSyncPolicyId())
+ d.Set("retention_policy_id", result.GetRetentionPolicyId())
+ d.Set("ops_pre_sync", flattenDSourceHooks(result.GetHooks().OpsPreSync, oldOpsPreSync))
+ d.Set("ops_post_sync", flattenDSourceHooks(result.GetHooks().OpsPostSync, oldOpsPostSync))
+
+ // get the tags and set it
+ resTagsDsrc, httpRes, err := client.DSourcesAPI.GetTagsDsource(ctx, dsource_id).Execute()
+ if err != nil {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch tags for dSource: "+dsource_id+". Error: "+err.Error())
+ } else if httpRes != nil && httpRes.StatusCode >= 400 {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch tags for dSource: "+dsource_id+". HTTP Status: "+httpRes.Status)
+ } else {
+ // check if tags are returned and set them to the state
+ if len(resTagsDsrc.GetTags()) != 0 {
+ tflog.Debug(ctx, DLPX+"Tags are present")
+ d.Set("tags", flattenTags(resTagsDsrc.GetTags()))
+ }
+ }
return diags
}
func resourceDsourceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+
+ var diags diag.Diagnostics
+ var updateFailure bool = false
+ var nonUpdatableField []string
+ client := meta.(*apiClient).client
+ updateAppdataDsource := dctapi.NewUpdateAppDataDSourceParameters()
+
+ dsourceId := d.Get("id").(string)
+
// get the changed keys
changedKeys := make([]string, 0, len(d.State().Attributes))
for k := range d.State().Attributes {
+ if strings.Contains(k, "tags") { // this is because the changed keys are of the form tag.0.keydi
+ k = "tags"
+ }
+ if strings.Contains(k, "ops_pre_sync") {
+ k = "ops_pre_sync"
+ }
+ if strings.Contains(k, "ops_post_sync") {
+ k = "ops_post_sync"
+ }
if d.HasChange(k) {
+ tflog.Debug(ctx, "changed keys"+k)
changedKeys = append(changedKeys, k)
}
}
- // revert and set the old value to the changed keys
+
+ // check if the changed keys are updatable
for _, key := range changedKeys {
- old, _ := d.GetChange(key)
- d.Set(key, old)
+ if !updatableAppdataDsourceKeys[key] {
+ updateFailure = true
+ tflog.Debug(ctx, "non updatable field: "+key)
+ nonUpdatableField = append(nonUpdatableField, key)
+ }
+ }
+
+ // if not updatable keys are provided, error out
+ if updateFailure {
+ revertChanges(d, changedKeys)
+ return diag.Errorf("cannot update options %v. Please refer to provider documentation for updatable params.", nonUpdatableField)
+ }
+
+ // set changed params in the updateOracleDsource
+ if d.HasChange("name") {
+ updateAppdataDsource.SetName(d.Get("name").(string))
+ }
+ if d.HasChange("description") {
+ updateAppdataDsource.SetDescription(d.Get("description").(string))
+ }
+ if d.HasChange("staging_environment") {
+ updateAppdataDsource.SetStagingEnvironment(d.Get("staging_environment").(string))
+ }
+ if d.HasChange("staging_environment_user") {
+ updateAppdataDsource.SetStagingEnvironmentUser(d.Get("staging_environment_user").(string))
+ }
+ if d.HasChange("environment_user") {
+ updateAppdataDsource.SetEnvironmentUser(d.Get("environment_user").(string))
+ }
+ if d.HasChange("parameters") {
+ if v, has_v := d.GetOk("parameters"); has_v {
+ params := make(map[string]interface{})
+ json.Unmarshal([]byte(v.(string)), ¶ms)
+ updateAppdataDsource.SetParameters(params)
+ }
+ }
+ if d.HasChange("sync_policy_id") {
+ updateAppdataDsource.SetSyncPolicyId(d.Get("sync_policy_id").(string))
+ }
+ if d.HasChange("retention_policy_id") {
+ updateAppdataDsource.SetRetentionPolicyId(d.Get("retention_policy_id").(string))
+ }
+ if d.HasChange("ops_pre_sync") {
+ if v, has_v := d.GetOk("ops_pre_sync"); has_v {
+ updateAppdataDsource.SetOpsPreSync(toSourceOperationArray(v))
+ } else {
+ updateAppdataDsource.SetOpsPreSync([]dctapi.SourceOperation{})
+ }
}
+ if d.HasChange("ops_pre_sync") {
+ if v, has_v := d.GetOk("ops_post_sync"); has_v {
+ updateAppdataDsource.SetOpsPostSync(toSourceOperationArray(v))
+ } else {
+ updateAppdataDsource.SetOpsPostSync([]dctapi.SourceOperation{})
+ }
+ }
+ // check if the updateAppdataDsource is not empty
+ if !isStructEmpty(updateAppdataDsource) {
+ tflog.Debug(ctx, "updating appdata dsource")
+ res, httpRes, err := client.DSourcesAPI.UpdateAppdataDsourceById(ctx, dsourceId).UpdateAppDataDSourceParameters(*updateAppdataDsource).Execute()
+
+ if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil {
+ // revert and set the old value to the changed keys
+ revertChanges(d, changedKeys)
+ return diags
+ }
- return diag.Errorf("Action update not implemented for resource : dSource")
+ if res != nil {
+ job_status, job_err := PollJobStatus(res.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Appdata Dsource Update Job Polling failed but continuing with update. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
+ if isJobTerminalFailure(job_status) {
+ return diag.Errorf("[NOT OK] Appdata Dsource Update %s. JobId: %s / Error: %s", job_status, res.Job.GetId(), job_err)
+ }
+ }
+ }
+
+ // update tags
+ if !d.Get("ignore_tag_changes").(bool) {
+ oldTags, newTags := d.GetChange("tags")
+ if !reflect.DeepEqual(oldTags, newTags) {
+ tflog.Debug(ctx, "updating tags")
+ // delete old tag
+ tflog.Debug(ctx, "deleting old tags")
+ if len(toTagArray(oldTags)) != 0 {
+ tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTags)[0].GetKey()+" "+toTagArray(oldTags)[0].GetValue())
+ deleteTag := *dctapi.NewDeleteTag()
+ tagDelResp, tagDelErr := client.DSourcesAPI.DeleteTagsDsource(ctx, dsourceId).DeleteTag(deleteTag).Execute()
+ if diags := apiErrorResponseHelper(ctx, nil, tagDelResp, tagDelErr); diags != nil {
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ }
+ }
+ // create tag
+ if len(toTagArray(newTags)) != 0 {
+ tflog.Info(ctx, "creating new tags")
+ _, httpResp, tagCrtErr := client.DSourcesAPI.CreateTagsDsource(ctx, dsourceId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTags))).Execute()
+ if diags := apiErrorResponseHelper(ctx, nil, httpResp, tagCrtErr); diags != nil {
+ revertChanges(d, changedKeys)
+ return diags
+ }
+ }
+ }
+ }
+
+ return diags
}
func resourceDsourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
@@ -605,15 +784,16 @@ func resourceDsourceDelete(ctx context.Context, d *schema.ResourceData, meta int
return diags
}
- job_status, job_err := PollJobStatus(res.GetId(), ctx, client)
- if job_err != "" {
- tflog.Warn(ctx, DLPX+WARN+"Job Polling failed but continuing with deletion. Error :"+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
- if isJobTerminalFailure(job_status) {
- return diag.Errorf("[NOT OK] dSource-Delete %s. JobId: %s / Error: %s", job_status, res.GetId(), job_err)
+ if res != nil {
+ job_status, job_err := PollJobStatus(res.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Job Polling failed but continuing with deletion. Error :"+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
+ if isJobTerminalFailure(job_status) {
+ return diag.Errorf("[NOT OK] dSource-Delete %s. JobId: %s / Error: %s", job_status, res.GetId(), job_err)
+ }
}
-
_, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) {
return client.DSourcesAPI.GetDsourceById(ctx, dsourceId).Execute()
})
diff --git a/internal/provider/resource_appdata_dsource_test.go b/internal/provider/resource_appdata_dsource_test.go
index 05214a1..7ed04da 100644
--- a/internal/provider/resource_appdata_dsource_test.go
+++ b/internal/provider/resource_appdata_dsource_test.go
@@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
-func TestDsource_create_positive(t *testing.T) {
+func Test_Acc_Appdata_Dsource(t *testing.T) {
sourceId := os.Getenv("DSOURCE_SOURCE_ID")
groupId := os.Getenv("DSOURCE_GROUP_ID")
name := os.Getenv("DSOURCE_NAME")
@@ -27,26 +27,68 @@ func TestDsource_create_positive(t *testing.T) {
CheckDestroy: testDsourceDestroy,
Steps: []resource.TestStep{
{
- Config: testDsourceBasic(sourceId, groupId, name, environmentUser, stagingEnvironment, ""),
+ Config: testDsourceBasic(sourceId, groupId, false, false, name, environmentUser, stagingEnvironment, "", "dlpx", "acc-test"),
ExpectError: regexp.MustCompile(`.*`),
},
{
- Config: testDsourceBasic(sourceId, groupId, name, environmentUser, stagingEnvironment, parameters),
+ Config: testDsourceBasic(sourceId, groupId, false, false, name, environmentUser, stagingEnvironment, parameters, "dlpx", "acc-test"),
Check: resource.ComposeTestCheckFunc(
testDsourceExists("delphix_appdata_dsource.new_data_dsource", sourceId),
resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "source_id", sourceId)),
},
{
- Config: testDsourceUpdate(sourceId, groupId, "update_same_dsource", environmentUser, stagingEnvironment, parameters),
- Check: resource.ComposeAggregateTestCheckFunc(
- // irrelevant
+ // positive update test case
+ Config: testDsourceBasic(sourceId, groupId, false, false, "update_name", environmentUser, stagingEnvironment, parameters, "dlpx", "acc-test"),
+ Check: resource.ComposeTestCheckFunc(
+ testDsourceExists("delphix_appdata_dsource.new_data_dsource", sourceId),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "name", "update_name"),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "source_id", sourceId)),
+ },
+ {
+ // updating a tag and expecting no plan changes
+ Config: testDsourceBasic(sourceId, groupId, false, false, "update_name", environmentUser, stagingEnvironment, parameters, "key1", "value1"),
+ Check: resource.ComposeTestCheckFunc(
+ testDsourceExists("delphix_appdata_dsource.new_data_dsource", sourceId),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "source_id", sourceId),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "tags.0.key", "key1"),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "tags.0.value", "value1"),
),
+ },
+ {
+ // updating a tag and expecting plan changes
+ Config: testDsourceBasic(sourceId, groupId, false, true, "update_name", environmentUser, stagingEnvironment, parameters, "key-upd", "value-upd"),
+ Check: resource.ComposeTestCheckFunc(
+ testDsourceExists("delphix_appdata_dsource.new_data_dsource", sourceId),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "source_id", sourceId),
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "tags.0.key", "key1"), // since ignore_tag_changes is true, the key should not change
+ resource.TestCheckResourceAttr("delphix_appdata_dsource.new_data_dsource", "tags.0.value", "value1"), // since ignore_tag_changes is true, the value should not change
+ ),
+ // PreConfig: func() {
+ // fmt.Printf("[DEBUG] Applying configuration: %s", testDsourceBasic(sourceId, groupId, true, "update_name", environmentUser, stagingEnvironment, parameters, "key-upd", "value-upd"))
+ // },
+ },
+ {
+ // negative update test case, we are updating make_account_owner to true
+ Config: testDsourceBasic(sourceId, groupId, true, false, name, environmentUser, stagingEnvironment, parameters, "dlpx", "acc-test"),
ExpectError: regexp.MustCompile(`.*`),
},
},
})
}
+// DEBUG: use this in check to see the resource state during test
+func checkState(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Resource not found: %s", n)
+ }
+
+ fmt.Printf("[DEBUG] Current state for %s: %v\n", n, rs.Primary.Attributes)
+ return nil
+ }
+}
+
func testDsourcePreCheck(t *testing.T, sourceId string, groupId string, name string, environmentUser string, stagingEnvironment string, parameters string) {
testAccPreCheck(t)
if sourceId == "" {
@@ -69,35 +111,17 @@ func testDsourcePreCheck(t *testing.T, sourceId string, groupId string, name str
}
}
-func testDsourceBasic(sourceId string, groupId string, name string, environmentUser string, stagingEnvironment string, parameters string) string {
- return fmt.Sprintf(`
-resource "delphix_appdata_dsource" "new_data_dsource" {
- source_value = "%s"
- group_id = "%s"
- log_sync_enabled = false
- make_current_account_owner = true
- link_type = "AppDataStaged"
- name = "%s"
- staging_mount_base = ""
- environment_user = "%s"
- staging_environment = "%s"
- parameters = jsonencode(%s)
- sync_parameters = jsonencode({
- resync = true
- })
-}
- `, sourceId, groupId, name, environmentUser, stagingEnvironment, parameters)
-}
-
-func testDsourceUpdate(sourceId string, groupId string, name string, environmentUser string, stagingEnvironment string, parameters string) string {
+func testDsourceBasic(sourceId string, groupId string, make_current_account_owner bool, ignore_tag_changes bool, name string, environmentUser string, stagingEnvironment string, parameters string, key string, value string) string {
return fmt.Sprintf(`
resource "delphix_appdata_dsource" "new_data_dsource" {
source_value = "%s"
group_id = "%s"
log_sync_enabled = false
- make_current_account_owner = true
+ make_current_account_owner = "%v"
link_type = "AppDataStaged"
+ ignore_tag_changes = %v
name = "%s"
+ rollback_on_failure = true
staging_mount_base = ""
environment_user = "%s"
staging_environment = "%s"
@@ -105,8 +129,35 @@ resource "delphix_appdata_dsource" "new_data_dsource" {
sync_parameters = jsonencode({
resync = true
})
+ tags {
+ key = "%s"
+ value = "%s"
+ }
+ ops_pre_sync {
+ name = "string-change-opspresync22"
+ command = "ls -lr"
+ shell = "bash"
+ credentials_env_vars {
+ base_var_name = "mypass2t"
+ password = "password_test"
+ }
+ credentials_env_vars {
+ base_var_name = "mypass3t"
+ password = "password_test"
+ }
+ }
+
+ ops_post_sync {
+ name = "string-change-opspostsync22"
+ command = "ls -lrta"
+ shell = "bash"
+ credentials_env_vars {
+ base_var_name = "mypassopspostsynct"
+ password = "password_test"
+ }
+ }
}
- `, sourceId, groupId, name, environmentUser, stagingEnvironment, parameters)
+ `, sourceId, groupId, make_current_account_owner, ignore_tag_changes, name, environmentUser, stagingEnvironment, parameters, key, value)
}
func testDsourceExists(n string, sourceId string) resource.TestCheckFunc {
diff --git a/internal/provider/resource_database_postgresql.go b/internal/provider/resource_database_postgresql.go
index fafdacb..eb19df5 100644
--- a/internal/provider/resource_database_postgresql.go
+++ b/internal/provider/resource_database_postgresql.go
@@ -111,6 +111,7 @@ func resourceSource() *schema.Resource {
"tags": {
Type: schema.TypeList,
Optional: true,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
diff --git a/internal/provider/resource_database_postgresql_test.go b/internal/provider/resource_database_postgresql_test.go
index eda3296..dfcf588 100644
--- a/internal/provider/resource_database_postgresql_test.go
+++ b/internal/provider/resource_database_postgresql_test.go
@@ -57,6 +57,10 @@ func testsourceBasic(repo_value string, name string) string {
resource "delphix_database_postgresql" "new_dsource" {
repository_value = "%s"
name = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
}
`, repo_value, name)
}
@@ -89,6 +93,10 @@ func testsourceUpdate(repo_value string, name string) string {
resource "delphix_database_postgresql" "new_dsource" {
repository_value = "%s"
name = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
}
`, repo_value, name)
}
diff --git a/internal/provider/resource_environment.go b/internal/provider/resource_environment.go
index e271dbf..20634f6 100644
--- a/internal/provider/resource_environment.go
+++ b/internal/provider/resource_environment.go
@@ -3,6 +3,8 @@ package provider
import (
"context"
"net/http"
+ "reflect"
+ "strings"
"github.com/hashicorp/terraform-plugin-log/tflog"
@@ -30,10 +32,6 @@ func resourceEnvironment() *schema.Resource {
Type: schema.TypeString,
Required: true,
},
- "os_name": {
- Type: schema.TypeString,
- Required: true,
- },
"is_cluster": {
Type: schema.TypeBool,
Optional: true,
@@ -42,10 +40,6 @@ func resourceEnvironment() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
- "hostname": {
- Type: schema.TypeString,
- Required: true,
- },
"staging_environment": {
Type: schema.TypeString,
Optional: true,
@@ -58,10 +52,6 @@ func resourceEnvironment() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
},
- "ssh_port": {
- Type: schema.TypeInt,
- Optional: true,
- },
"toolkit_path": {
Type: schema.TypeString,
Optional: true,
@@ -134,13 +124,6 @@ func resourceEnvironment() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
},
- "nfs_addresses": {
- Type: schema.TypeList,
- Optional: true,
- Elem: &schema.Schema{
- Type: schema.TypeString,
- },
- },
"ase_db_username": {
Type: schema.TypeString,
Optional: true,
@@ -149,10 +132,6 @@ func resourceEnvironment() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
- "java_home": {
- Type: schema.TypeString,
- Optional: true,
- },
"dsp_keystore_path": {
Type: schema.TypeString,
Optional: true,
@@ -177,14 +156,63 @@ func resourceEnvironment() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
+ "os_type": {
+ Type: schema.TypeString,
+ Default: "UNIX",
+ Optional: true,
+ DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
+ if old != new {
+ tflog.Info(context.Background(), "updating os_type is not allowed. plan changes are suppressed")
+ }
+ return d.Id() != ""
+ },
+ },
+ "database_type": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "version": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "oracle_base": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "bits": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "allow_provisioning": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ "is_staging": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ "is_replica": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ "is_windows_target": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ "ignore_tag_changes": {
+ Type: schema.TypeBool,
+ Default: true,
+ Optional: true,
+ },
"tags": {
Type: schema.TypeList,
Optional: true,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
},
"value": {
Type: schema.TypeString,
@@ -192,35 +220,94 @@ func resourceEnvironment() *schema.Resource {
},
},
},
+ DiffSuppressFunc: func(_, old, new string, d *schema.ResourceData) bool {
+ if ignore, ok := d.GetOk("ignore_tag_changes"); ok && ignore.(bool) {
+ return true
+ }
+ return false
+ },
+ },
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
},
"namespace": {
Type: schema.TypeString,
Computed: true,
},
+ "namespace_name": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "namespace_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"hosts": {
Type: schema.TypeList,
- Computed: true,
+ Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"hostname": {
Type: schema.TypeString,
Required: true,
},
+ "ssh_port": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true, // because this gets returned in the read even if not set on the config
+ },
+ "toolkit_path": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "oracle_tde_keystores_root_path": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "java_home": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "nfs_addresses": {
+ Type: schema.TypeList,
+ Optional: true,
+ Computed: true, // because this gets returned in the read even if not set on the config
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
"os_name": {
Type: schema.TypeString,
- Required: true,
+ Computed: true,
},
"os_version": {
Type: schema.TypeString,
- Required: true,
+ Computed: true,
},
"memory_size": {
Type: schema.TypeInt,
- Required: true,
+ Computed: true,
+ },
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "processor_type": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "timezone": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "available": {
+ Type: schema.TypeBool,
+ Computed: true,
},
},
},
@@ -232,27 +319,46 @@ func resourceEnvironment() *schema.Resource {
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
- Required: true,
+ Computed: true,
},
"name": {
Type: schema.TypeString,
- Required: true,
+ Computed: true,
},
"database_type": {
Type: schema.TypeString,
- Required: true,
+ Computed: true,
+ },
+ "version": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "oracle_base": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "bits": {
+ Type: schema.TypeInt,
+ Computed: true,
},
"allow_provisioning": {
Type: schema.TypeBool,
- Required: true,
+ Computed: true,
},
"is_staging": {
Type: schema.TypeBool,
- Required: true,
+ Computed: true,
},
},
},
},
+ "user_ref": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
},
}
}
@@ -263,10 +369,30 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta
var diags diag.Diagnostics
client := meta.(*apiClient).client
+ var hostname, toolkit_path, java_home string
+ var ssh_port int
+ var nfs_addresses interface{}
+ // process hosts
+ if v, has_v := d.GetOk("hosts"); has_v {
+ hosts := v.([]interface{})
+ if len(hosts) > 0 {
+ host := hosts[0].(map[string]interface{}) // Cast host to a map
+ // os_type = host["os_type"].(string)
+ // oracle_tde_keystores_root_path = host["oracle_tde_keystores_root_path"].(string)
+ hostname = host["hostname"].(string)
+ toolkit_path = host["toolkit_path"].(string)
+ if val, ok := host["ssh_port"]; ok {
+ ssh_port = val.(int)
+ }
+ java_home = host["java_home"].(string)
+ nfs_addresses = host["nfs_addresses"]
+ }
+ }
+
createEnvParams := dctapi.NewEnvironmentCreateParameters(
d.Get("engine_id").(string),
- d.Get("os_name").(string),
- d.Get("hostname").(string),
+ d.Get("os_type").(string),
+ hostname,
)
//General
@@ -279,11 +405,14 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta
if v, has_v := d.GetOk("name"); has_v {
createEnvParams.SetName(v.(string))
}
- if v, has_v := d.GetOk("toolkit_path"); has_v {
- createEnvParams.SetToolkitPath(v.(string))
+ if toolkit_path != "" {
+ createEnvParams.SetToolkitPath(toolkit_path)
+ }
+ if ssh_port != 0 {
+ createEnvParams.SetSshPort(int64(ssh_port))
}
- if v, has_v := d.GetOk("ssh_port"); has_v {
- createEnvParams.SetSshPort(int64(v.(int)))
+ if java_home != "" {
+ createEnvParams.SetJavaHome(java_home)
}
if v, has_v := d.GetOk("ase_db_username"); has_v {
createEnvParams.SetAseDbUsername(v.(string))
@@ -291,9 +420,7 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta
if v, has_v := d.GetOk("ase_db_password"); has_v {
createEnvParams.SetAseDbPassword(v.(string))
}
- if v, has_v := d.GetOk("java_home"); has_v {
- createEnvParams.SetJavaHome(v.(string))
- }
+
if v, has_v := d.GetOk("dsp_keystore_path"); has_v {
createEnvParams.SetDspKeystorePath(v.(string))
}
@@ -359,10 +486,9 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta
}
// Clusters
- os_name := d.Get("os_name").(string)
if v := d.Get("is_cluster"); v.(bool) {
createEnvParams.SetIsCluster(v.(bool))
- if os_name == "WINDOWS" {
+ if d.Get("os_type").(string) == "WINDOWS" {
createEnvParams.SetIsTarget(d.Get("is_target").(bool))
}
}
@@ -378,8 +504,8 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta
if v, has_v := d.GetOk("staging_environment"); has_v {
createEnvParams.SetStagingEnvironment(v.(string))
}
- if v, has_v := d.GetOk("nfs_addresses"); has_v {
- createEnvParams.SetNfsAddresses(toStringArray(v))
+ if nfs_addresses != nil {
+ createEnvParams.SetNfsAddresses(toStringArray(nfs_addresses))
}
if v, has_v := d.GetOk("tags"); has_v {
createEnvParams.SetTags(toTagArray(v))
@@ -414,17 +540,10 @@ func resourceEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta
func resourceEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*apiClient).client
envId := d.Id()
-
apiRes, diags := PollForObjectExistence(ctx, func() (interface{}, *http.Response, error) {
return client.EnvironmentsAPI.GetEnvironmentById(ctx, envId).Execute()
})
- if apiRes == nil {
- tflog.Error(ctx, DLPX+ERROR+"Environment not found: "+envId+", removing from state. ")
- d.SetId("")
- return nil
- }
-
if diags != nil {
_, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) {
return client.EnvironmentsAPI.GetEnvironmentById(ctx, envId).Execute()
@@ -437,18 +556,460 @@ func resourceEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta i
}
return nil
}
+ _, os_type_exists := d.GetOk("os_type")
+ if !os_type_exists {
+ // its an import or upgrade, set to default value
+ d.Set("os_type", "UNIX")
+ }
envRes, _ := apiRes.(*dctapi.Environment)
+ //d.SetId(envRes.GetId())
+ d.Set("name", envRes.GetName())
+ d.Set("id", envRes.GetId())
d.Set("namespace", envRes.GetNamespace())
+ d.Set("namespace_name", envRes.GetNamespaceName())
+ d.Set("namespace_id", envRes.GetNamespaceId())
+ d.Set("is_replica", envRes.GetIsReplica())
+ d.Set("engine_id", envRes.GetEngineId())
+ d.Set("is_cluster", envRes.GetIsCluster())
d.Set("enabled", envRes.GetEnabled())
+ d.Set("is_windows_target", envRes.GetIsWindowsTarget())
+ d.Set("staging_environment", envRes.GetStagingEnvironment())
+ d.Set("cluster_home", envRes.GetClusterHome())
d.Set("hosts", flattenHosts(envRes.GetHosts()))
d.Set("repositories", flattenHostRepositories(envRes.GetRepositories()))
+
+ if user_ref, has_user_ref := d.GetOk("user_ref"); has_user_ref {
+ // this is set from update
+ tflog.Info(ctx, "Setting username in state(read)")
+ resUserList, httpResUserList, errUserList := client.EnvironmentsAPI.ListEnvironmentUsers(ctx, envId).Execute()
+ if diags := apiErrorResponseHelper(ctx, resUserList, httpResUserList, errUserList); diags != nil {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch user list for environment: "+envId+". Error: "+diags[0].Summary)
+ // return diag.Errorf("unable to retrieve user list")
+ } else {
+ for _, users := range resUserList.GetUsers() {
+ if strings.EqualFold(users.GetUserRef(), user_ref.(string)) {
+ d.Set("username", users.GetUsername())
+ }
+ }
+ }
+ }
+
+ // get the tags and set it
+ resTagsEnv, httpRes, err := client.EnvironmentsAPI.GetTagsEnvironment(ctx, envId).Execute()
+ if err != nil {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch tags for environment: "+envId+". Error: "+err.Error())
+ } else if httpRes != nil && httpRes.StatusCode >= 400 {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch tags for environment: "+envId+". HTTP Status: "+httpRes.Status)
+ } else {
+ // check if tags are returned and set them to the state
+ if len(resTagsEnv.GetTags()) != 0 {
+ tflog.Debug(ctx, DLPX+"Tags are present")
+ d.Set("tags", flattenTags(resTagsEnv.GetTags()))
+ }
+ }
+
return diags
}
func resourceEnvironmentUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- tflog.Info(ctx, DLPX+INFO+"Not Implemented: resourceEnvironmentUpdate")
var diags diag.Diagnostics
+
+ // get the changed keys
+ changedKeys := make([]string, 0, len(d.State().Attributes))
+ var modifiedChangedKeys []string
+ for k := range d.State().Attributes {
+ if strings.Contains(k, "tags") { // this is because the changed keys are of the form tag.0.key
+ k = "tags"
+ }
+ if d.HasChange(k) {
+ changedKeys = append(changedKeys, k)
+ }
+ }
+ for _, ck := range changedKeys {
+ // for hosts it will be in the form hosts.0.nfs_addresses.#
+ if strings.Contains(ck, "hosts.0.hostname") ||
+ strings.Contains(ck, "hosts.0.ssh_port") ||
+ strings.Contains(ck, "hosts.0.toolkit_path") ||
+ strings.Contains(ck, "hosts.0.java_home") ||
+ strings.Contains(ck, "hosts.0.nfs_addresses") {
+ ck = "hosts"
+ }
+ modifiedChangedKeys = append(modifiedChangedKeys, ck)
+ }
+
+ client := meta.(*apiClient).client
+ environmentId := d.Get("id").(string)
+ var updateFailure, destructiveUpdate bool = false, false
+ var nonUpdatableField []string
+ var dsourceItems []dctapi.DSource
+ var vdbs []dctapi.VDB
+ var vdbDiags, dsourceDiags diag.Diagnostics
+ var disableDsourceFailure bool = false
+ // if changedKeys contains non updatable field set a flag
+ for _, key := range modifiedChangedKeys {
+ if !updatableEnvKeys[key] {
+ // we stop the update process here if non supported attribute is detected here
+ updateFailure = true
+ nonUpdatableField = append(nonUpdatableField, key)
+ }
+ }
+
+ if updateFailure {
+ revertChanges(d, changedKeys)
+ return diag.Errorf("cannot update options %v. Please refer to provider documentation for updatable params.", nonUpdatableField)
+ }
+ // find if destructive update
+ for _, key := range changedKeys {
+ if isDestructiveEnvUpdate[key] {
+ tflog.Info(ctx, "isDestructiveUpdate: "+key)
+ destructiveUpdate = true
+ }
+ }
+
+ if destructiveUpdate {
+ // get dsources and vdbs
+ vdbs, vdbDiags = filterVDBs(ctx, client, environmentId)
+ if vdbDiags.HasError() {
+ revertChanges(d, changedKeys)
+ return vdbDiags
+ }
+
+ // get sources to get dsources
+ sources, sourceDiag := filterSources(ctx, client, environmentId)
+ if sourceDiag.HasError() {
+ revertChanges(d, changedKeys)
+ return sourceDiag
+ }
+ var sourceIds []string
+ for _, item := range sources {
+ sourceIds = append(sourceIds, item.GetId())
+ }
+
+ // retrieve dsources from source list
+
+ if len(sourceIds) > 0 {
+ dsourceItems, dsourceDiags = filterdSources(ctx, client, sourceIds)
+ if dsourceDiags != nil {
+ revertChanges(d, changedKeys)
+ return dsourceDiags
+ }
+ }
+
+ // disable vdb
+ for _, item := range vdbs {
+ if diags := disableVDB(ctx, client, item.GetId()); diags != nil {
+ tflog.Error(ctx, "failure in disabling vdbs")
+ //disableVdbFailure = true
+ revertChanges(d, changedKeys)
+ return diags
+ }
+ }
+
+ // disable dsources
+ for _, item := range dsourceItems {
+ if diags := disabledSource(ctx, client, item.GetId()); diags != nil {
+ tflog.Error(ctx, "failure in disabling Dsources")
+ disableDsourceFailure = true
+ }
+ }
+ if disableDsourceFailure {
+ //enable back vdbs and return
+ for _, item := range vdbs {
+ if diags := enableVDB(ctx, client, item.GetId()); diags != nil {
+ revertChanges(d, changedKeys)
+ return diags
+ }
+ }
+ }
+ }
+ var failureEvents []string
+ // if no disable failures, proceed to update
+ if d.HasChanges(
+ "name",
+ "cluster_home",
+ "description",
+ ) {
+ // env update
+ tflog.Info(ctx, "Proceeding to update environment")
+ envUpdateParam := dctapi.NewEnvironmentUpdateParameters()
+ if d.HasChange("name") {
+ if v, has_v := d.GetOk("name"); has_v {
+ envUpdateParam.SetName(v.(string))
+ }
+ }
+ if d.HasChange("cluster_home") {
+ if v, has_v := d.GetOk("cluster_home"); has_v {
+ envUpdateParam.SetClusterHome(v.(string))
+ }
+ }
+ if d.HasChange("description") {
+ if v, has_v := d.GetOk("description"); has_v {
+ envUpdateParam.SetDescription(v.(string))
+ }
+ }
+ if !isStructEmpty(envUpdateParam) {
+ res, httpRes, err := client.EnvironmentsAPI.UpdateEnvironment(ctx, environmentId).EnvironmentUpdateParameters(*envUpdateParam).Execute()
+ if diags := apiErrorResponseHelper(ctx, res, httpRes, err); diags != nil {
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ if len(diags) > 0 {
+ failureEvents = append(failureEvents, diags[0].Summary)
+ } else {
+ tflog.Warn(ctx, "UpdateEnvironment Diagnostics is empty or nil; skipping appending to failureEvents")
+ }
+ }
+
+ // if the above api call fails, no point in polling as res will be nil
+ if res != nil {
+ job_res, job_err := PollJobStatus(res.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Env Host Update Job Polling failed but continuing with update. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+res.Job.GetId()+"!")
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ failureEvents = append(failureEvents, job_err)
+ // return diag.Errorf("[NOT OK] Job %s %s with error %s", *res.Job.Id, job_res, job_err)
+ }
+ }
+ }
+ }
+ if d.HasChanges(
+ "username",
+ "password",
+ ) {
+ tflog.Info(ctx, "Proceeding to update environment user")
+ // envUser Update
+ envUserUpdateParam := dctapi.NewEnvironmentUserParams()
+ if d.HasChange("username") || d.HasChange("password") {
+ if v, has_v := d.GetOk("username"); has_v {
+ envUserUpdateParam.SetUsername(v.(string))
+ }
+ if v, has_v := d.GetOk("password"); has_v {
+ envUserUpdateParam.SetPassword(v.(string))
+ }
+ }
+ // get the user ref
+ tflog.Info(ctx, "Getting the userlist")
+ resUserList, httpResUserList, errUserList := client.EnvironmentsAPI.ListEnvironmentUsers(ctx, environmentId).Execute()
+ if diags := apiErrorResponseHelper(ctx, resUserList, httpResUserList, errUserList); diags != nil {
+ revertChanges(d, changedKeys)
+ return diags
+ }
+
+ var user_ref string
+
+ username, old_username := d.GetChange("username")
+ for _, users := range resUserList.GetUsers() {
+ tflog.Info(ctx, "Getting the users: "+users.GetUsername())
+ if strings.EqualFold(users.GetUsername(), username.(string)) {
+ user_ref = users.GetUserRef()
+ break
+ } else {
+ if strings.EqualFold(users.GetUsername(), old_username.(string)) {
+ tflog.Info(ctx, "Setting the old user-ref: "+users.GetUserRef())
+ user_ref = users.GetUserRef()
+ break
+ }
+ }
+ }
+
+ // this is to propagate the value to read call which is defined at the end.
+ // we will use the user_ref to filter from the list of users in the env
+ tflog.Info(ctx, "Setting the user_ref: "+user_ref)
+ d.Set("user_ref", user_ref)
+
+ if !isStructEmpty(envUserUpdateParam) {
+ tflog.Info(ctx, "Updating the user: "+user_ref)
+ resEnvUser, httpResEnvUser, errEnvUser := client.EnvironmentsAPI.UpdateEnvironmentUser(ctx, environmentId, user_ref).EnvironmentUserParams(*envUserUpdateParam).Execute()
+ if diags := apiErrorResponseHelper(ctx, resEnvUser, httpResEnvUser, errEnvUser); diags != nil {
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ if len(diags) > 0 {
+ failureEvents = append(failureEvents, diags[0].Summary)
+ } else {
+ tflog.Warn(ctx, "UpdateEnvironmentUser Diagnostics is empty or nil; skipping appending to failureEvents")
+ }
+ }
+
+ if resEnvUser != nil {
+ job_res, job_err := PollJobStatus(resEnvUser.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Env User Update Job Polling failed but continuing with update. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+resEnvUser.Job.GetId()+"!")
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ failureEvents = append(failureEvents, job_err)
+ // return diag.Errorf("[NOT OK] Job %s %s with error %s", *resEnvUser.Job.Id, job_res, job_err)
+ }
+ }
+
+ }
+
+ }
+ if d.HasChanges(
+ "hosts",
+ "connector_port",
+ ) {
+ tflog.Info(ctx, "Proceeding to update environment hosts")
+ // host update
+ var hostId string
+
+ // get changes
+ oldHosts, newHosts := d.GetChange("hosts")
+
+ // signifies the hostname that will be updated
+ oldHost := oldHosts.([]interface{})
+ oldHostName := oldHost[0].(map[string]interface{})["hostname"].(string)
+
+ // retrieving new params for the update
+ newHost := newHosts.([]interface{})
+ newHostName := newHost[0].(map[string]interface{})["hostname"].(string)
+ newSshPort := int64(newHost[0].(map[string]interface{})["ssh_port"].(int))
+ newToolkitPath := newHost[0].(map[string]interface{})["toolkit_path"].(string)
+ newJavaHome := newHost[0].(map[string]interface{})["java_home"].(string)
+ newNfsAddress := newHost[0].(map[string]interface{})["nfs_addresses"]
+
+ // get the hosts list
+ hostsList := d.Get("hosts").([]interface{})
+
+ // retrieve the hostId corresponding to the old host name (that will be updated)
+ for _, host := range hostsList {
+ if oldHostName == host.(map[string]interface{})["hostname"].(string) {
+ hostId = host.(map[string]interface{})["id"].(string)
+ tflog.Info(ctx, "hostsId: "+hostId)
+ break
+ } else {
+ // if not found, proceed with enable and finally display the failure events
+ updateFailure = true
+ failureEvents = append(failureEvents, "No hostname %s found to update", oldHostName)
+ }
+ }
+
+ if !updateFailure {
+ tflog.Info(ctx, DLPX+INFO+" hostID "+hostId)
+ tflog.Info(ctx, DLPX+INFO+" environmentId "+environmentId)
+
+ hostUpdateParam := dctapi.NewHostUpdateParameters()
+ if d.HasChange("connector_port") {
+ if v, has_v := d.GetOk("connector_port"); has_v {
+ hostUpdateParam.SetConnectorPort(v.(int32))
+ }
+ }
+ if newJavaHome != "" {
+ hostUpdateParam.SetJavaHome(newJavaHome)
+ }
+ if newHostName != "" {
+ hostUpdateParam.SetHostname(newHostName)
+ }
+ if newSshPort != 0 {
+ hostUpdateParam.SetSshPort(newSshPort)
+ }
+ if newToolkitPath != "" {
+ hostUpdateParam.SetToolkitPath(newToolkitPath)
+ }
+ if newNfsAddress != nil {
+ hostUpdateParam.SetNfsAddresses(toStringArray(newNfsAddress))
+ }
+ // if d.HasChange("oracle_tde_keystores_root_path") {
+ // if v, has_v := d.GetOk("oracle_tde_keystores_root_path"); has_v {
+ // hostUpdateParam.SetOracleTdeKeystoresRootPath(v.(string))
+ // }
+ // }
+
+ if !isStructEmpty(hostUpdateParam) {
+ hostUpdateRes, hostHttpRes, hostUpdateErr := client.EnvironmentsAPI.UpdateHost(ctx, environmentId, hostId).HostUpdateParameters(*hostUpdateParam).Execute()
+ if diags := apiErrorResponseHelper(ctx, hostUpdateRes, hostHttpRes, hostUpdateErr); diags != nil {
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ if len(diags) > 0 {
+ failureEvents = append(failureEvents, diags[0].Summary)
+ } else {
+ tflog.Warn(ctx, "UpdateHost Diagnostics is empty or nil; skipping appending to failureEvents")
+ }
+ }
+
+ if hostUpdateRes != nil {
+ job_res, job_err := PollJobStatus(hostUpdateRes.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Env Host Update Job Polling failed but continuing with update. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+hostUpdateRes.Job.GetId()+"!")
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ failureEvents = append(failureEvents, job_err)
+ // return diag.Errorf("[NOT OK] Job %s %s with error %s", *hostUpdateRes.Job.Id, job_res, job_err)
+ }
+ }
+ }
+ }
+ }
+
+ // update tags
+ if !d.Get("ignore_tag_changes").(bool) {
+ oldTags, newTags := d.GetChange("tags")
+ if !reflect.DeepEqual(oldTags, newTags) {
+ tflog.Debug(ctx, "updating tags")
+ // delete old tag
+ tflog.Debug(ctx, "deleting old tags")
+ if len(toTagArray(oldTags)) != 0 {
+ tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTags)[0].GetKey()+" "+toTagArray(oldTags)[0].GetValue())
+ deleteTag := *dctapi.NewDeleteTag()
+ tagDelResp, tagDelErr := client.EnvironmentsAPI.DeleteEnvironmentTags(ctx, environmentId).DeleteTag(deleteTag).Execute()
+ if diags := apiErrorResponseHelper(ctx, nil, tagDelResp, tagDelErr); diags != nil {
+ revertChanges(d, changedKeys)
+ updateFailure = true
+ if len(diags) > 0 {
+ failureEvents = append(failureEvents, diags[0].Summary)
+ } else {
+ tflog.Warn(ctx, "DeleteEnvironmentTags Diagnostics is empty or nil; skipping appending to failureEvents")
+ }
+ }
+ }
+ // create tag
+ if len(toTagArray(newTags)) != 0 {
+ tflog.Info(ctx, "creating new tags")
+ _, httpResp, tagCrtErr := client.EnvironmentsAPI.CreateEnvironmentTags(ctx, environmentId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTags))).Execute()
+ if diags := apiErrorResponseHelper(ctx, nil, httpResp, tagCrtErr); diags != nil {
+ revertChanges(d, changedKeys)
+ return diags
+ }
+ }
+ }
+ }
+
+ if destructiveUpdate {
+ // enable Dsources back
+ for _, item := range dsourceItems {
+ if diags := enableDsource(ctx, client, item.GetId()); diags != nil {
+ return diags
+ }
+ }
+ // enable VDB back
+ for _, item := range vdbs {
+ if diags := enableVDB(ctx, client, item.GetId()); diags != nil {
+ return diags
+ }
+ }
+ }
+
+ // return the error back
+ if updateFailure {
+ return diag.Errorf("[NOT OK] Update failed with error %s", failureEvents)
+ }
+
+ readDiags := resourceEnvironmentRead(ctx, d, meta)
+ if readDiags.HasError() {
+ return readDiags
+ }
return diags
}
diff --git a/internal/provider/resource_environment_test.go b/internal/provider/resource_environment_test.go
index e6151bd..6dd02c6 100644
--- a/internal/provider/resource_environment_test.go
+++ b/internal/provider/resource_environment_test.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"os"
+ "regexp"
"strings"
"testing"
@@ -11,6 +12,8 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
+var env_name = "test-acc-name"
+
func TestAccEnvironment_positive(t *testing.T) {
engineId := os.Getenv("ACC_ENV_ENGINE_ID")
username := os.Getenv("ACC_ENV_USERNAME")
@@ -26,9 +29,63 @@ func TestAccEnvironment_positive(t *testing.T) {
{
Config: testAccCheckDctEnvConfigBasic(engineId, username, password, hostname, toolkitPath),
Check: resource.ComposeTestCheckFunc(
- // TODO: hostname isn't not set yet?
- testAccCheckDctEnvResourceExists("delphix_environment.new_env", hostname),
- resource.TestCheckResourceAttr("delphix_environment.new_env", "hostname", hostname)),
+ testAccCheckDctEnvResourceExists("delphix_environment.new_env", engineId),
+ resource.TestCheckResourceAttr("delphix_environment.new_env", "name", env_name)),
+ },
+ },
+ })
+}
+
+func TestAccEnvironment_update_positive(t *testing.T) {
+ engineId := os.Getenv("ACC_ENV_ENGINE_ID")
+ username := os.Getenv("ACC_ENV_USERNAME")
+ password := os.Getenv("ACC_ENV_PASSWORD")
+ hostname := os.Getenv("ACC_ENV_HOSTNAME")
+ toolkitPath := os.Getenv("ACC_ENV_TOOLKIT_PATH")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccEnvPreCheck(t, engineId, username, password, hostname, toolkitPath) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckEnvDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckDctEnvConfigBasic(engineId, username, password, hostname, toolkitPath),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckDctEnvResourceExists("delphix_environment.new_env", engineId),
+ resource.TestCheckResourceAttr("delphix_environment.new_env", "name", env_name)),
+ },
+ {
+ // positive env update case
+ Config: testAccEnvUpdatePositive(engineId, username, password, hostname, toolkitPath),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("delphix_environment.new_env", "name", "updated-name")),
+ },
+ },
+ })
+}
+
+func TestAccEnvironment_update_negative(t *testing.T) {
+ engineId := os.Getenv("ACC_ENV_ENGINE_ID")
+ username := os.Getenv("ACC_ENV_USERNAME")
+ password := os.Getenv("ACC_ENV_PASSWORD")
+ hostname := os.Getenv("ACC_ENV_HOSTNAME")
+ toolkitPath := os.Getenv("ACC_ENV_TOOLKIT_PATH")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccEnvPreCheck(t, engineId, username, password, hostname, toolkitPath) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckEnvDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckDctEnvConfigBasic(engineId, username, password, hostname, toolkitPath),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckDctEnvResourceExists("delphix_environment.new_env", engineId),
+ resource.TestCheckResourceAttr("delphix_environment.new_env", "name", env_name)),
+ },
+ {
+ // negative update test case
+ Config: testAccEnvUpdateNegative(engineId, username, password, "updated-hostname", toolkitPath),
+ ExpectError: regexp.MustCompile("Error running apply: exit status 1"),
},
},
})
@@ -54,7 +111,7 @@ func testAccEnvPreCheck(t *testing.T, engineId string, username string, password
}
func escape(s string) string {
- // Escape backslash or terraform interepts it as a special character
+ // Escape backslash or terraform interprets it as a special character
return strings.ReplaceAll(s, "\\", "\\\\")
}
@@ -62,17 +119,23 @@ func testAccCheckDctEnvConfigBasic(engineId string, username string, password st
return fmt.Sprintf(`
resource "delphix_environment" "new_env" {
engine_id = %s
- os_name = "UNIX"
+ os_type = "UNIX"
username = "%s"
password = "%s"
- hostname = "%s"
- toolkit_path = "%s"
- name = "test-acc-name"
+ name = "%s"
+ hosts {
+ hostname = "%s"
+ toolkit_path = "%s"
+ }
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
}
- `, engineId, escape(username), escape(password), escape(hostname), escape(toolkitPath))
+ `, engineId, escape(username), escape(password), env_name, escape(hostname), escape(toolkitPath))
}
-func testAccCheckDctEnvResourceExists(n string, hostname string) resource.TestCheckFunc {
+func testAccCheckDctEnvResourceExists(n string, engineId string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -91,9 +154,9 @@ func testAccCheckDctEnvResourceExists(n string, hostname string) resource.TestCh
return err
}
- actualHostname := res.GetHosts()[0].GetHostname()
- if actualHostname != hostname {
- return fmt.Errorf("actualHostname %s does not match hostname %s", actualHostname, hostname)
+ dctEngineId := res.GetEngineId()
+ if dctEngineId != engineId {
+ return fmt.Errorf("dctEngineId %s does not match provided engineID %s", dctEngineId, engineId)
}
return nil
@@ -122,3 +185,43 @@ func testAccCheckEnvDestroy(s *terraform.State) error {
return nil
}
+
+func testAccEnvUpdatePositive(engineId string, username string, password string, hostname string, toolkitPath string) string {
+ return fmt.Sprintf(`
+ resource "delphix_environment" "new_env" {
+ engine_id = %s
+ os_type = "UNIX"
+ username = "%s"
+ password = "%s"
+ name = "updated-name"
+ hosts {
+ hostname = "%s"
+ toolkit_path = "%s"
+ }
+ tags {
+ key = "dlpx-changed"
+ value = "acc-test-changed"
+ }
+ }
+ `, engineId, escape(username), escape(password), escape(hostname), escape(toolkitPath))
+}
+
+func testAccEnvUpdateNegative(engineId string, username string, password string, hostname string, toolkitPath string) string {
+ return fmt.Sprintf(`
+ resource "delphix_environment" "new_env" {
+ engine_id = %s
+ os_type = "UNIX"
+ username = "%s"
+ password = "%s"
+ name = "%s"
+ hosts {
+ hostname = "%s"
+ toolkit_path = "%s"
+ }
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ }
+ `, engineId, escape(username), escape(password), env_name, escape(hostname), escape(toolkitPath))
+}
diff --git a/internal/provider/resource_oracle_dsource.go b/internal/provider/resource_oracle_dsource.go
index 84b8355..416992f 100644
--- a/internal/provider/resource_oracle_dsource.go
+++ b/internal/provider/resource_oracle_dsource.go
@@ -3,6 +3,7 @@ package provider
import (
"context"
"net/http"
+ "reflect"
"strings"
"github.com/hashicorp/terraform-plugin-log/tflog"
@@ -72,9 +73,15 @@ func resourceOracleDsource() *schema.Resource {
return d.Id() != ""
},
},
+ "ignore_tag_changes": {
+ Type: schema.TypeBool,
+ Default: true,
+ Optional: true,
+ },
"tags": {
Type: schema.TypeList,
Optional: true,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
@@ -87,6 +94,12 @@ func resourceOracleDsource() *schema.Resource {
},
},
},
+ DiffSuppressFunc: func(_, old, new string, d *schema.ResourceData) bool {
+ if ignore, ok := d.GetOk("ignore_tag_changes"); ok && ignore.(bool) {
+ return true
+ }
+ return false
+ },
},
"ops_pre_sync": {
Type: schema.TypeList,
@@ -901,12 +914,26 @@ func resourceOracleDsourceRead(ctx context.Context, d *schema.ResourceData, meta
d.Set("current_timeflow_id", result.GetCurrentTimeflowId())
d.Set("is_appdata", result.GetIsAppdata())
d.Set("sync_policy_id", result.GetSyncPolicyId())
- d.Set("retention_policy_id", result.GetReplicaRetentionPolicyId())
+ d.Set("retention_policy_id", result.GetRetentionPolicyId())
d.Set("log_sync_enabled", result.GetLogsyncEnabled())
d.Set("exported_data_directory", result.GetExportedDataDirectory())
d.Set("ops_pre_sync", flattenDSourceHooks(result.GetHooks().OpsPreSync, oldOpsPreSync))
d.Set("ops_post_sync", flattenDSourceHooks(result.GetHooks().OpsPostSync, oldOpsPostSync))
d.Set("ops_pre_log_sync", flattenDSourceHooks(result.GetHooks().OpsPreLogSync, oldOpsPreLogSync))
+
+ // get the tags and set it
+ resTagsDsrc, httpRes, err := client.DSourcesAPI.GetTagsDsource(ctx, dsource_id).Execute()
+ if err != nil {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch tags for dSource: "+dsource_id+". Error: "+err.Error())
+ } else if httpRes != nil && httpRes.StatusCode >= 400 {
+ tflog.Error(ctx, DLPX+ERROR+"Failed to fetch tags for dSource: "+dsource_id+". HTTP Status: "+httpRes.Status)
+ } else {
+ // check if tags are returned and set them to the state
+ if len(resTagsDsrc.GetTags()) != 0 {
+ tflog.Debug(ctx, DLPX+"Tags are present")
+ d.Set("tags", flattenTags(resTagsDsrc.GetTags()))
+ }
+ }
return diags
}
@@ -1029,33 +1056,36 @@ func resourceOracleDsourceUpdate(ctx context.Context, d *schema.ResourceData, me
updateOracleDsource.SetHooks(*ndsh)
}
- res, httpRes, err := client.DSourcesAPI.UpdateOracleDsourceById(ctx, dsourceId).UpdateOracleDsourceParameters(*updateOracleDsource).Execute()
+ if !isStructEmpty(updateOracleDsource) {
+ res, httpRes, err := client.DSourcesAPI.UpdateOracleDsourceById(ctx, dsourceId).UpdateOracleDsourceParameters(*updateOracleDsource).Execute()
- if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil {
- // revert and set the old value to the changed keys
- revertChanges(d, changedKeys)
- return diags
- }
+ if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil {
+ // revert and set the old value to the changed keys
+ revertChanges(d, changedKeys)
+ return diags
+ }
- job_status, job_err := PollJobStatus(res.Job.GetId(), ctx, client)
- if job_err != "" {
- tflog.Warn(ctx, DLPX+WARN+"Dsource Update Job Polling failed but continuing with update. Error: "+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
- if isJobTerminalFailure(job_status) {
- return diag.Errorf("[NOT OK] Dsource-Update %s. JobId: %s / Error: %s", job_status, res.Job.GetId(), job_err)
+ if res != nil {
+ job_status, job_err := PollJobStatus(res.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Oracle Dsource Update Job Polling failed but continuing with update. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
+ if isJobTerminalFailure(job_status) {
+ return diag.Errorf("[NOT OK] Oracle Dsource-Update %s. JobId: %s / Error: %s", job_status, res.Job.GetId(), job_err)
+ }
+ }
}
- if d.HasChanges(
- "tags",
- ) { // tags update
- tflog.Debug(ctx, "updating tags")
- if d.HasChange("tags") {
+ // update tags
+ if !d.Get("ignore_tag_changes").(bool) {
+ oldTags, newTags := d.GetChange("tags")
+ if !reflect.DeepEqual(oldTags, newTags) {
+ tflog.Debug(ctx, "updating tags")
// delete old tag
tflog.Debug(ctx, "deleting old tags")
- oldTag, newTag := d.GetChange("tags")
- if len(toTagArray(oldTag)) != 0 {
- tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTag)[0].GetKey()+" "+toTagArray(oldTag)[0].GetValue())
+ if len(toTagArray(oldTags)) != 0 {
+ tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTags)[0].GetKey()+" "+toTagArray(oldTags)[0].GetValue())
deleteTag := *dctapi.NewDeleteTag()
tagDelResp, tagDelErr := client.DSourcesAPI.DeleteTagsDsource(ctx, dsourceId).DeleteTag(deleteTag).Execute()
if diags := apiErrorResponseHelper(ctx, nil, tagDelResp, tagDelErr); diags != nil {
@@ -1064,9 +1094,9 @@ func resourceOracleDsourceUpdate(ctx context.Context, d *schema.ResourceData, me
}
}
// create tag
- if len(toTagArray(newTag)) != 0 {
+ if len(toTagArray(newTags)) != 0 {
tflog.Info(ctx, "creating new tags")
- _, httpResp, tagCrtErr := client.DSourcesAPI.CreateTagsDsource(ctx, dsourceId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTag))).Execute()
+ _, httpResp, tagCrtErr := client.DSourcesAPI.CreateTagsDsource(ctx, dsourceId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTags))).Execute()
if diags := apiErrorResponseHelper(ctx, nil, httpResp, tagCrtErr); diags != nil {
revertChanges(d, changedKeys)
return diags
@@ -1092,15 +1122,16 @@ func resourceOracleDsourceDelete(ctx context.Context, d *schema.ResourceData, me
return diags
}
- job_status, job_err := PollJobStatus(res.GetId(), ctx, client)
- if job_err != "" {
- tflog.Warn(ctx, DLPX+WARN+"Job Polling failed but continuing with deletion. Error :"+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
- if isJobTerminalFailure(job_status) {
- return diag.Errorf("[NOT OK] dSource-Delete %s. JobId: %s / Error: %s", job_status, res.GetId(), job_err)
+ if res != nil {
+ job_status, job_err := PollJobStatus(res.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"Job Polling failed but continuing with deletion. Error :"+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
+ if isJobTerminalFailure(job_status) {
+ return diag.Errorf("[NOT OK] dSource-Delete %s. JobId: %s / Error: %s", job_status, res.GetId(), job_err)
+ }
}
-
_, diags := PollForObjectDeletion(ctx, func() (interface{}, *http.Response, error) {
return client.DSourcesAPI.GetDsourceById(ctx, dsourceId).Execute()
})
diff --git a/internal/provider/resource_oracle_dsource_test.go b/internal/provider/resource_oracle_dsource_test.go
index f8a2cbe..f033920 100644
--- a/internal/provider/resource_oracle_dsource_test.go
+++ b/internal/provider/resource_oracle_dsource_test.go
@@ -21,7 +21,7 @@ func TestOracleDsource_create_positive(t *testing.T) {
testOracleDsourcePreCheck(t, sourcevalue, groupId, name)
},
Providers: testAccProviders,
- CheckDestroy: testDsourceDestroy,
+ CheckDestroy: testOracleDsourceDestroy,
Steps: []resource.TestStep{
{
Config: testOracleDsourceBasic(name, sourcevalue, groupId),
@@ -31,10 +31,10 @@ func TestOracleDsource_create_positive(t *testing.T) {
},
{
// positive update test case
- Config: testOracleDsourceBasic("update_name", sourcevalue, groupId),
+ Config: testOracleDsourceBasic("update_name", sourcevalue, groupId), // changing the name to update-name
Check: resource.ComposeTestCheckFunc(
testOracleDsourceExists("delphix_oracle_dsource.test_oracle_dsource", sourcevalue),
- resource.TestCheckResourceAttr("delphix_oracle_dsource.test_oracle_dsource", "name", "update_name"),
+ resource.TestCheckResourceAttr("delphix_oracle_dsource.test_oracle_dsource", "name", "update_name"), // asserting the updated name
resource.TestCheckResourceAttr("delphix_oracle_dsource.test_oracle_dsource", "group_id", groupId)),
},
{
@@ -69,7 +69,45 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" {
name = "%s"
source_value = "%s"
group_id = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ ops_pre_sync {
+ name = "string-change-opspresync22"
+ command = "ls -lr"
+ shell = "bash"
+ credentials_env_vars {
+ base_var_name = "mypass2t"
+ password = "password_test"
+ }
+ credentials_env_vars {
+ base_var_name = "mypass3t"
+ password = "password_test"
+ }
+ }
+
+ ops_post_sync {
+ name = "string-change-opspostsync22"
+ command = "ls -lrta"
+ shell = "bash"
+ credentials_env_vars {
+ base_var_name = "mypassopspostsynct"
+ password = "password_test"
+ }
+ }
+
+ ops_pre_log_sync {
+ name = "string-change-opsprelogsync22"
+ command = "ls -lrt"
+ shell = "shell"
+ credentials_env_vars {
+ base_var_name = "mypassopsprelogsynct"
+ password = "password_test"
+ }
+ }
}
+
`, name, sourceValue, groupId)
}
@@ -79,6 +117,43 @@ resource "delphix_oracle_dsource" "test_oracle_dsource" {
name = "%s"
source_value = "%s"
description = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ ops_pre_sync {
+ name = "string-change-opspresync22"
+ command = "ls -lr"
+ shell = "bash"
+ credentials_env_vars {
+ base_var_name = "mypass2t"
+ password = "password_test"
+ }
+ credentials_env_vars {
+ base_var_name = "mypass3t"
+ password = "password_test"
+ }
+ }
+
+ ops_post_sync {
+ name = "string-change-opspostsync22"
+ command = "ls -lrta"
+ shell = "bash"
+ credentials_env_vars {
+ base_var_name = "mypassopspostsynct"
+ password = "password_test"
+ }
+ }
+
+ ops_pre_log_sync {
+ name = "string-change-opsprelogsync22"
+ command = "ls -lrt"
+ shell = "shell"
+ credentials_env_vars {
+ base_var_name = "mypassopsprelogsynct"
+ password = "password_test"
+ }
+ }
}
`, name, sourceValue, description)
}
diff --git a/internal/provider/resource_vdb.go b/internal/provider/resource_vdb.go
index 5ebf0e2..bc25ebb 100644
--- a/internal/provider/resource_vdb.go
+++ b/internal/provider/resource_vdb.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"net/http"
+ "reflect"
"strings"
"time"
@@ -579,14 +580,20 @@ func resourceVdb() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
+ "ignore_tag_changes": {
+ Type: schema.TypeBool,
+ Default: true,
+ Optional: true,
+ },
"tags": {
Type: schema.TypeList,
Optional: true,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
},
"value": {
Type: schema.TypeString,
@@ -594,6 +601,12 @@ func resourceVdb() *schema.Resource {
},
},
},
+ DiffSuppressFunc: func(_, old, new string, d *schema.ResourceData) bool {
+ if ignore, ok := d.GetOk("ignore_tag_changes"); ok && ignore.(bool) {
+ return true
+ }
+ return false
+ },
},
"appdata_source_params": {
Type: schema.TypeString,
@@ -998,16 +1011,17 @@ func helper_provision_by_snapshot(ctx context.Context, d *schema.ResourceData, m
d.SetId(apiRes.GetVdbId())
- job_res, job_err := PollJobStatus(apiRes.Job.GetId(), ctx, client)
- if job_err != "" {
- tflog.Error(ctx, DLPX+ERROR+"Job Polling failed but continuing with provisioning. Error: "+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
- if job_res == Failed || job_res == Canceled || job_res == Abandoned {
- tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+apiRes.Job.GetId()+"!")
- return diag.Errorf("[NOT OK] Job %s %s with error %s", apiRes.Job.GetId(), job_res, job_err)
+ if apiRes != nil {
+ job_res, job_err := PollJobStatus(apiRes.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Error(ctx, DLPX+ERROR+"Job Polling failed but continuing with provisioning. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+apiRes.Job.GetId()+"!")
+ return diag.Errorf("[NOT OK] Job %s %s with error %s", apiRes.Job.GetId(), job_res, job_err)
+ }
}
-
readDiags := resourceVdbRead(ctx, d, meta)
if readDiags.HasError() {
@@ -1246,16 +1260,17 @@ func helper_provision_by_timestamp(ctx context.Context, d *schema.ResourceData,
d.SetId(apiRes.GetVdbId())
- job_res, job_err := PollJobStatus(apiRes.Job.GetId(), ctx, client)
- if job_err != "" {
- tflog.Error(ctx, DLPX+ERROR+"Job Polling failed but continuing with provisioning. Error: "+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
- if job_res == "FAILED" {
- tflog.Error(ctx, DLPX+ERROR+"Job "+apiRes.Job.GetId()+" Failed!")
- return diag.Errorf("[NOT OK] Job %s Failed with error %s", apiRes.Job.GetId(), job_err)
+ if apiRes != nil {
+ job_res, job_err := PollJobStatus(apiRes.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Error(ctx, DLPX+ERROR+"Job Polling failed but continuing with provisioning. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == "FAILED" {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+apiRes.Job.GetId()+" Failed!")
+ return diag.Errorf("[NOT OK] Job %s Failed with error %s", apiRes.Job.GetId(), job_err)
+ }
}
-
readDiags := resourceVdbRead(ctx, d, meta)
if readDiags.HasError() {
@@ -1479,16 +1494,17 @@ func helper_provision_by_bookmark(ctx context.Context, d *schema.ResourceData, m
d.SetId(apiRes.GetVdbId())
- job_res, job_err := PollJobStatus(apiRes.Job.GetId(), ctx, client)
- if job_err != "" {
- tflog.Error(ctx, DLPX+ERROR+"Job Polling failed but continuing with provisioning. Error: "+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
- if job_res == Failed || job_res == Canceled || job_res == Abandoned {
- tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+apiRes.Job.GetId()+"!")
- return diag.Errorf("[NOT OK] Job %s %s with error %s", apiRes.Job.GetId(), job_res, job_err)
+ if apiRes != nil {
+ job_res, job_err := PollJobStatus(apiRes.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Error(ctx, DLPX+ERROR+"Job Polling failed but continuing with provisioning. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+apiRes.Job.GetId()+"!")
+ return diag.Errorf("[NOT OK] Job %s %s with error %s", apiRes.Job.GetId(), job_res, job_err)
+ }
}
-
readDiags := resourceVdbRead(ctx, d, meta)
if readDiags.HasError() {
@@ -1686,9 +1702,6 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa
var updateFailure, destructiveUpdate bool = false, false
var nonUpdatableField []string
- // var vdbs []dctapi.VDB
- // var vdbDiags diag.Diagnostics
-
// if changedKeys contains non updatable field set a flag
for _, key := range changedKeys {
if !updatableVdbKeys[key] {
@@ -1808,7 +1821,7 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa
}
}
- if nvdh != nil {
+ if nvdh != nil && !isStructEmpty(nvdh) {
updateVDBParam.SetHooks(*nvdh)
}
@@ -1902,34 +1915,36 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa
json.Unmarshal([]byte(d.Get("config_params").(string)), &config_params)
updateVDBParam.SetConfigParams(config_params)
}
+ if !isStructEmpty(updateVDBParam) {
+ res, httpRes, err := client.VDBsAPI.UpdateVdbById(ctx, d.Get("id").(string)).UpdateVDBParameters(*updateVDBParam).Execute()
- res, httpRes, err := client.VDBsAPI.UpdateVdbById(ctx, d.Get("id").(string)).UpdateVDBParameters(*updateVDBParam).Execute()
-
- if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil {
- // revert and set the old value to the changed keys
- revertChanges(d, changedKeys)
- return diags
- }
+ if diags := apiErrorResponseHelper(ctx, nil, httpRes, err); diags != nil {
+ // revert and set the old value to the changed keys
+ revertChanges(d, changedKeys)
+ return diags
+ }
- job_status, job_err := PollJobStatus(res.Job.GetId(), ctx, client)
- if job_err != "" {
- tflog.Warn(ctx, DLPX+WARN+"VDB Update Job Polling failed but continuing with update. Error: "+job_err)
- }
- tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
- if isJobTerminalFailure(job_status) {
- return diag.Errorf("[NOT OK] VDB-Update %s. JobId: %s / Error: %s", job_status, res.Job.GetId(), job_err)
+ if res != nil {
+ job_status, job_err := PollJobStatus(res.Job.GetId(), ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"VDB Update Job Polling failed but continuing with update. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_status)
+ if isJobTerminalFailure(job_status) {
+ return diag.Errorf("[NOT OK] VDB-Update %s. JobId: %s / Error: %s", job_status, res.Job.GetId(), job_err)
+ }
+ }
}
- if d.HasChanges(
- "tags",
- ) { // tags update
- tflog.Debug(ctx, "updating tags")
- if d.HasChange("tags") {
+ // update tags
+ if !d.Get("ignore_tag_changes").(bool) {
+ oldTags, newTags := d.GetChange("tags")
+ if !reflect.DeepEqual(oldTags, newTags) {
+ tflog.Debug(ctx, "updating tags")
// delete old tag
tflog.Debug(ctx, "deleting old tags")
- oldTag, newTag := d.GetChange("tags")
- if len(toTagArray(oldTag)) != 0 {
- tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTag)[0].GetKey()+" "+toTagArray(oldTag)[0].GetValue())
+ if len(toTagArray(oldTags)) != 0 {
+ tflog.Debug(ctx, "tag to be deleted: "+toTagArray(oldTags)[0].GetKey()+" "+toTagArray(oldTags)[0].GetValue())
deleteTag := *dctapi.NewDeleteTag()
tagDelResp, tagDelErr := client.VDBsAPI.DeleteVdbTags(ctx, vdbId).DeleteTag(deleteTag).Execute()
if diags := apiErrorResponseHelper(ctx, nil, tagDelResp, tagDelErr); diags != nil {
@@ -1938,9 +1953,9 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa
}
}
// create tag
- if len(toTagArray(newTag)) != 0 {
+ if len(toTagArray(newTags)) != 0 {
tflog.Info(ctx, "creating new tags")
- _, httpResp, tagCrtErr := client.VDBsAPI.CreateVdbTags(ctx, vdbId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTag))).Execute()
+ _, httpResp, tagCrtErr := client.VDBsAPI.CreateVdbTags(ctx, vdbId).TagsRequest(*dctapi.NewTagsRequest(toTagArray(newTags))).Execute()
if diags := apiErrorResponseHelper(ctx, nil, httpResp, tagCrtErr); diags != nil {
revertChanges(d, changedKeys)
return diags
@@ -1948,6 +1963,7 @@ func resourceVdbUpdate(ctx context.Context, d *schema.ResourceData, meta interfa
}
}
}
+
if destructiveUpdate {
if diags := enableVDB(ctx, client, vdbId); diags != nil {
return diags //if failure should we enable
diff --git a/internal/provider/resource_vdb_test.go b/internal/provider/resource_vdb_test.go
index 114fe4b..64c5b34 100644
--- a/internal/provider/resource_vdb_test.go
+++ b/internal/provider/resource_vdb_test.go
@@ -101,6 +101,65 @@ func testAccCheckDctVDBConfigBasic() string {
resource "delphix_vdb" "new" {
auto_select_repository = true
source_data_id = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ post_snapshot {
+ name = "post_snap"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ pre_snapshot {
+ name = "string"
+ command = "ls -l"
+ shell = "bash"
+ }
+ pre_stop {
+ name = "string"
+ command = "ls -ltr"
+ shell = "bash"
+ }
+ configure_clone {
+ name = "configure_clone"
+ command = "ls -tr"
+ shell = "bash"
+ }
+ post_refresh {
+ name = "string"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ post_stop {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_rollback {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_start {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_rollback {
+ name = "Pre_rollback"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_start {
+ name = "pre_start"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_refresh {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
}
`, datasource_id)
}
@@ -113,6 +172,65 @@ func testAccCheckDctVDBConfigAppDataBasic() string {
auto_select_repository = true
source_data_id = "%s"
appdata_source_params = jsonencode(%s)
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ post_snapshot {
+ name = "post_snap"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ pre_snapshot {
+ name = "string"
+ command = "ls -l"
+ shell = "bash"
+ }
+ pre_stop {
+ name = "string"
+ command = "ls -ltr"
+ shell = "bash"
+ }
+ configure_clone {
+ name = "configure_clone"
+ command = "ls -tr"
+ shell = "bash"
+ }
+ post_refresh {
+ name = "string"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ post_stop {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_rollback {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_start {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_rollback {
+ name = "Pre_rollback"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_start {
+ name = "pre_start"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_refresh {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
}
`, appdata_datasource_id, appdata_source_params)
}
@@ -177,6 +295,65 @@ func testAccCheckDctVDBBookmarkConfigBasic() string {
provision_type = "bookmark"
auto_select_repository = true
bookmark_id = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ post_snapshot {
+ name = "post_snap"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ pre_snapshot {
+ name = "string"
+ command = "ls -l"
+ shell = "bash"
+ }
+ pre_stop {
+ name = "string"
+ command = "ls -ltr"
+ shell = "bash"
+ }
+ configure_clone {
+ name = "configure_clone"
+ command = "ls -tr"
+ shell = "bash"
+ }
+ post_refresh {
+ name = "string"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ post_stop {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_rollback {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_start {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_rollback {
+ name = "Pre_rollback"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_start {
+ name = "pre_start"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_refresh {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
}
`, bookmark_id)
@@ -320,6 +497,65 @@ func testAccUpdateNegative(value bool) string {
resource "delphix_vdb" "new" {
auto_select_repository = "%t"
source_data_id = "%s"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ post_snapshot {
+ name = "post_snap"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ pre_snapshot {
+ name = "string"
+ command = "ls -l"
+ shell = "bash"
+ }
+ pre_stop {
+ name = "string"
+ command = "ls -ltr"
+ shell = "bash"
+ }
+ configure_clone {
+ name = "configure_clone"
+ command = "ls -tr"
+ shell = "bash"
+ }
+ post_refresh {
+ name = "string"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ post_stop {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_rollback {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_start {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_rollback {
+ name = "Pre_rollback"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_start {
+ name = "pre_start"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_refresh {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
}
`, value, datasource_id)
}
@@ -332,6 +568,65 @@ func testAccUpdatePositive(name string, vdb_restart bool) string {
source_data_id = "%s"
name = "%s"
vdb_restart = "%t"
+ tags {
+ key = "dlpx"
+ value = "acc-test"
+ }
+ post_snapshot {
+ name = "post_snap"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ pre_snapshot {
+ name = "string"
+ command = "ls -l"
+ shell = "bash"
+ }
+ pre_stop {
+ name = "string"
+ command = "ls -ltr"
+ shell = "bash"
+ }
+ configure_clone {
+ name = "configure_clone"
+ command = "ls -tr"
+ shell = "bash"
+ }
+ post_refresh {
+ name = "string"
+ command = "ls -lrt"
+ shell = "bash"
+ }
+ post_stop {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_rollback {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ post_start {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_rollback {
+ name = "Pre_rollback"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_start {
+ name = "pre_start"
+ command = "ls"
+ shell = "bash"
+ }
+ pre_refresh {
+ name = "string"
+ command = "ls"
+ shell = "bash"
+ }
}
`, datasource_id, name, vdb_restart)
}
diff --git a/internal/provider/utility.go b/internal/provider/utility.go
index ba6248a..f0d6d7f 100644
--- a/internal/provider/utility.go
+++ b/internal/provider/utility.go
@@ -2,11 +2,13 @@ package provider
import (
"context"
+ "fmt"
"io"
"math"
"net/http"
"reflect"
"strconv"
+ "strings"
"time"
dctapi "github.com/delphix/dct-sdk-go/v25"
@@ -114,10 +116,20 @@ func flattenHosts(hosts []dctapi.Host) []interface{} {
returnedHosts := make([]interface{}, len(hosts))
for i, host := range hosts {
returnedHost := make(map[string]interface{})
+ returnedHost["id"] = host.GetId()
returnedHost["hostname"] = host.GetHostname()
returnedHost["os_name"] = host.GetOsName()
returnedHost["os_version"] = host.GetOsVersion()
returnedHost["memory_size"] = host.GetMemorySize()
+ returnedHost["ssh_port"] = host.GetSshPort()
+ returnedHost["toolkit_path"] = host.GetToolkitPath()
+ returnedHost["processor_type"] = host.GetProcessorType()
+ returnedHost["timezone"] = host.GetTimezone()
+ returnedHost["available"] = host.GetAvailable()
+ returnedHost["nfs_addresses"] = host.GetNfsAddresses()
+ returnedHost["java_home"] = host.GetJavaHome()
+ returnedHost["oracle_tde_keystores_root_path"] = host.GetOracleTdeKeystoresRootPath()
+
returnedHosts[i] = returnedHost
}
return returnedHosts
@@ -135,6 +147,8 @@ func flattenHostRepositories(repos []dctapi.Repository) []interface{} {
returnedRepo["database_type"] = host.GetDatabaseType()
returnedRepo["allow_provisioning"] = host.GetAllowProvisioning()
returnedRepo["is_staging"] = host.GetIsStaging()
+ returnedRepo["oracle_base"] = host.GetOracleBase()
+ returnedRepo["bits"] = host.GetBits()
returnedRepos[i] = returnedRepo
}
return returnedRepos
@@ -234,6 +248,7 @@ func apiErrorResponseHelper(ctx context.Context, res interface{}, httpRes *http.
tflog.Error(ctx, DLPX+ERROR+"An error occurred: "+nerr.Error())
diags = diag.FromErr(nerr)
} else {
+ tflog.Info(ctx, DLPX+INFO+"Error: "+resBody)
diags = diag.Errorf(resBody)
}
return diags
@@ -373,3 +388,145 @@ func isSnapSyncFailure(job_id string, ctx context.Context, client *dctapi.APICli
}
return false
}
+
+func filterVDBs(ctx context.Context, client *dctapi.APIClient, envId string) ([]dctapi.VDB, diag.Diagnostics) {
+ tflog.Info(ctx, DLPX+INFO+"Filter VBDs by envId "+envId)
+ vdbSearchExpr := dctapi.NewSearchBody()
+ vdbSearchExpr.SetFilterExpression(fmt.Sprintf("environment_id eq '%s'", envId))
+
+ apiReq := client.VDBsAPI.SearchVdbs(ctx)
+ apiRes, httpRes, err := apiReq.SearchBody(*vdbSearchExpr).Execute()
+ if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil {
+ return nil, diags
+ }
+ return apiRes.Items, nil
+}
+
+func filterSources(ctx context.Context, client *dctapi.APIClient, envId string) ([]dctapi.Source, diag.Diagnostics) {
+ tflog.Info(ctx, DLPX+INFO+"Filter Sources by envId "+envId)
+ sourceSearchExpr := dctapi.NewSearchBody()
+ sourceSearchExpr.SetFilterExpression(fmt.Sprintf("environment_id eq '%s'", envId))
+ apiReq := client.SourcesAPI.SearchSources(ctx)
+ apiRes, httpRes, err := apiReq.SearchBody(*sourceSearchExpr).Execute()
+ if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil {
+ return nil, diags
+ }
+ return apiRes.Items, nil
+}
+
+func filterdSources(ctx context.Context, client *dctapi.APIClient, sourceIds []string) ([]dctapi.DSource, diag.Diagnostics) {
+ tflog.Info(ctx, DLPX+INFO+"Filter dSources by SourceIds "+strings.Join(sourceIds, ", "))
+ dsourceSearchExpr := dctapi.NewSearchBody()
+ dsourceSearchExpr.SetFilterExpression(fmt.Sprintf("source_id in ['%s']", strings.Join(sourceIds, "', '")))
+ tflog.Info(ctx, DLPX+INFO+"Filter dSources by SourceIds "+dsourceSearchExpr.GetFilterExpression())
+ apiReq := client.DSourcesAPI.SearchDsources(ctx)
+ apiRes, httpRes, err := apiReq.SearchBody(*dsourceSearchExpr).Execute()
+ if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil {
+ return nil, diags
+ }
+ return apiRes.Items, nil
+}
+
+func disabledSource(ctx context.Context, client *dctapi.APIClient, dsourceId string) diag.Diagnostics {
+ tflog.Info(ctx, DLPX+INFO+"Disable dSource "+dsourceId)
+ disableDsourceParam := dctapi.NewDisableDsourceParameters()
+ apiRes, httpRes, err := client.DSourcesAPI.DisableDsource(ctx, dsourceId).DisableDsourceParameters(*disableDsourceParam).Execute()
+ if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil {
+ return diags
+ }
+ job_res, job_err := PollJobStatus(*apiRes.Job.Id, ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"dSource disable Job Polling failed. Error: "+job_err)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+*apiRes.Job.Id+"!")
+ return diag.Errorf("[NOT OK] Job %s %s with error %s", *apiRes.Job.Id, job_res, job_err)
+ }
+ return nil
+} //decide if continue or exit
+
+func enableDsource(ctx context.Context, client *dctapi.APIClient, dsourceId string) diag.Diagnostics {
+ tflog.Info(ctx, DLPX+INFO+"Enable dSource "+dsourceId)
+ enableDsourceParam := dctapi.NewEnableDsourceParameters()
+ apiRes, httpRes, err := client.DSourcesAPI.EnableDsource(ctx, dsourceId).EnableDsourceParameters(*enableDsourceParam).Execute()
+ if diags := apiErrorResponseHelper(ctx, apiRes, httpRes, err); diags != nil {
+ return diags
+ }
+ job_res, job_err := PollJobStatus(*apiRes.Job.Id, ctx, client)
+ if job_err != "" {
+ tflog.Warn(ctx, DLPX+WARN+"dSource enable Job Polling failed. Error: "+job_res)
+ }
+ tflog.Info(ctx, DLPX+INFO+"Job result is "+job_res)
+ if job_res == Failed || job_res == Canceled || job_res == Abandoned {
+ tflog.Error(ctx, DLPX+ERROR+"Job "+job_res+" "+*apiRes.Job.Id+"!")
+ return diag.Errorf("[NOT OK] Job %s %s with error %s", *apiRes.Job.Id, job_res, job_err)
+ }
+ return nil
+} //decide if continue or exit
+
+func toSourceOperationArray(array interface{}) []dctapi.SourceOperation {
+ items := []dctapi.SourceOperation{}
+ for _, item := range array.([]interface{}) {
+ item_map := item.(map[string]interface{})
+ sourceOperation := dctapi.NewSourceOperation(item_map["name"].(string), item_map["command"].(string))
+ if item_map["shell"].(string) != "" {
+ sourceOperation.SetShell(item_map["shell"].(string))
+ }
+ sourceOperation.SetCredentialsEnvVars(toCredentialsEnvVariableArray(item_map["credentials_env_vars"]))
+ items = append(items, *sourceOperation)
+ }
+ return items
+}
+
+func toCredentialsEnvVariableArray(array interface{}) []dctapi.CredentialsEnvVariable {
+ items := []dctapi.CredentialsEnvVariable{}
+ for _, item := range array.([]interface{}) {
+ item_map := item.(map[string]interface{})
+
+ credentialsEnvVariable_item := dctapi.NewCredentialsEnvVariable(item_map["base_var_name"].(string))
+ if item_map["password"].(string) != "" {
+ credentialsEnvVariable_item.SetPassword(item_map["password"].(string))
+ }
+ if item_map["vault"].(string) != "" {
+ credentialsEnvVariable_item.SetVault(item_map["vault"].(string))
+ }
+ if item_map["hashicorp_vault_engine"].(string) != "" {
+ credentialsEnvVariable_item.SetHashicorpVaultEngine(item_map["hashicorp_vault_engine"].(string))
+ }
+ if item_map["hashicorp_vault_secret_path"].(string) != "" {
+ credentialsEnvVariable_item.SetHashicorpVaultSecretPath(item_map["hashicorp_vault_secret_path"].(string))
+ }
+ if item_map["hashicorp_vault_username_key"].(string) != "" {
+ credentialsEnvVariable_item.SetHashicorpVaultUsernameKey(item_map["hashicorp_vault_username_key"].(string))
+ }
+ if item_map["hashicorp_vault_secret_key"].(string) != "" {
+ credentialsEnvVariable_item.SetHashicorpVaultSecretKey(item_map["hashicorp_vault_secret_key"].(string))
+ }
+ if item_map["azure_vault_name"].(string) != "" {
+ credentialsEnvVariable_item.SetAzureVaultName(item_map["azure_vault_name"].(string))
+ }
+ if item_map["azure_vault_username_key"].(string) != "" {
+ credentialsEnvVariable_item.SetAzureVaultUsernameKey(item_map["azure_vault_username_key"].(string))
+ }
+ if item_map["azure_vault_secret_key"].(string) != "" {
+ credentialsEnvVariable_item.SetAzureVaultSecretKey(item_map["azure_vault_secret_key"].(string))
+ }
+ if item_map["cyberark_vault_query_string"].(string) != "" {
+ credentialsEnvVariable_item.SetCyberarkVaultQueryString(item_map["cyberark_vault_query_string"].(string))
+ }
+ items = append(items, *credentialsEnvVariable_item)
+ }
+ return items
+}
+
+// isStructEmpty checks if all fields in a struct are at their zero values
+func isStructEmpty(v interface{}) bool {
+ val := reflect.ValueOf(v).Elem() // Get the underlying value of the pointer
+ for i := 0; i < val.NumField(); i++ {
+ if !val.Field(i).IsZero() {
+ return false // If any field is not zero, the struct is not empty
+ }
+ }
+ return true
+}