diff --git a/docs/data-sources/sfs_export_policy.md b/docs/data-sources/sfs_export_policy.md
new file mode 100644
index 000000000..e60538044
--- /dev/null
+++ b/docs/data-sources/sfs_export_policy.md
@@ -0,0 +1,56 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_export_policy Data Source - stackit"
+subcategory: ""
+description: |-
+ SFS export policy datasource schema. Must have a region specified in the provider configuration.
+ ~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_export_policy (Data Source)
+
+SFS export policy datasource schema. Must have a `region` specified in the provider configuration.
+
+~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+data "stackit_sfs_export_policy" "example" {
+ project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ policy_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `policy_id` (String) Export policy ID
+- `project_id` (String) STACKIT project ID to which the export policy is associated.
+
+### Optional
+
+- `region` (String) The resource region. If not defined, the provider region is used.
+
+### Read-Only
+
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`policy_id`".
+- `name` (String) Name of the export policy.
+- `rules` (Attributes List) (see [below for nested schema](#nestedatt--rules))
+
+
+### Nested Schema for `rules`
+
+Optional:
+
+- `description` (String) Description of the Rule
+
+Read-Only:
+
+- `ip_acl` (List of String) IP access control list; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).
+- `order` (Number) Order of the rule within a Share Export Policy. The order is used so that when a client IP matches multiple rules, the first rule is applied
+- `read_only` (Boolean) Flag to indicate if client IPs matching this rule can only mount the share in read only mode
+- `set_uuid` (Boolean) Flag to honor set UUID
+- `super_user` (Boolean) Flag to indicate if client IPs matching this rule have root access on the Share
diff --git a/docs/data-sources/sfs_resource_pool.md b/docs/data-sources/sfs_resource_pool.md
new file mode 100644
index 000000000..6d0036947
--- /dev/null
+++ b/docs/data-sources/sfs_resource_pool.md
@@ -0,0 +1,47 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_resource_pool Data Source - stackit"
+subcategory: ""
+description: |-
+ Resource-pool datasource schema. Must have a region specified in the provider configuration.
+ ~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_resource_pool (Data Source)
+
+Resource-pool datasource schema. Must have a `region` specified in the provider configuration.
+
+~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+data "stackit_sfs_resource_pool" "resourcepool" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `project_id` (String) STACKIT project ID to which the resource pool is associated.
+- `resource_pool_id` (String) Resourcepool ID
+
+### Optional
+
+- `region` (String) The resource region. Read-only attribute that reflects the provider region.
+
+### Read-Only
+
+- `availability_zone` (String) Availability zone.
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`resource_pool_id`".
+- `ip_acl` (List of String) List of IPs that can mount the resource pool in read-only; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).
+- `name` (String) Name of the resource pool.
+- `performance_class` (String) Name of the performance class.
+- `performance_class_downgradable_at` (String) Time when the performance class can be downgraded again.
+- `size_gigabytes` (Number) Size of the resource pool (unit: gigabytes)
+- `size_reducible_at` (String) Time when the size can be reduced again.
+- `snapshots_are_visible` (Boolean) If set to true, snapshots are visible and accessible to users. (default: false)
diff --git a/docs/data-sources/sfs_resource_pool_snapshot.md b/docs/data-sources/sfs_resource_pool_snapshot.md
new file mode 100644
index 000000000..4c1a1e871
--- /dev/null
+++ b/docs/data-sources/sfs_resource_pool_snapshot.md
@@ -0,0 +1,52 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_resource_pool_snapshot Data Source - stackit"
+subcategory: ""
+description: |-
+ Resource-pool datasource schema. Must have a region specified in the provider configuration.
+ ~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_resource_pool_snapshot (Data Source)
+
+Resource-pool datasource schema. Must have a `region` specified in the provider configuration.
+
+~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+data "stackit_sfs_resource_pool_snapshot" "example" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `project_id` (String) STACKIT project ID to which the resource pool snapshot is associated.
+- `resource_pool_id` (String) Resource pool ID
+
+### Optional
+
+- `region` (String) The resource region. Read-only attribute that reflects the provider region.
+
+### Read-Only
+
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`resource_pool_id`".
+- `snapshots` (Attributes List) Resource-pool datasource schema. Must have a `region` specified in the provider configuration. (see [below for nested schema](#nestedatt--snapshots))
+
+
+### Nested Schema for `snapshots`
+
+Read-Only:
+
+- `comment` (String) (optional) A comment to add more information about a snapshot
+- `created_at` (String) creation date of the snapshot
+- `logical_size_gigabytes` (Number) Represents the user-visible data size at the time of the snapshot (e.g. what’s in the snapshot)
+- `resource_pool_id` (String) ID of the Resource Pool of the Snapshot
+- `size_gigabytes` (Number) Reflects the actual storage footprint in the backend at snapshot time (e.g. how much storage from the Resource Pool does it use)
+- `snapshot_name` (String) Name of the Resource Pool Snapshot
diff --git a/docs/data-sources/sfs_share.md b/docs/data-sources/sfs_share.md
new file mode 100644
index 000000000..9a2d7ecaf
--- /dev/null
+++ b/docs/data-sources/sfs_share.md
@@ -0,0 +1,50 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_share Data Source - stackit"
+subcategory: ""
+description: |-
+ SFS Share schema. Must have a region specified in the provider configuration.
+ ~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_share (Data Source)
+
+SFS Share schema. Must have a `region` specified in the provider configuration.
+
+~> This datasource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+data "stackit_sfs_share" "example" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ share_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `project_id` (String) STACKIT project ID to which the share is associated.
+- `resource_pool_id` (String) The ID of the resource pool for the SFS share.
+- `share_id` (String) share ID
+
+### Optional
+
+- `region` (String) The resource region. Read-only attribute that reflects the provider region.
+
+### Read-Only
+
+- `export_policy` (String) Name of the Share Export Policy to use in the Share.
+Note that if this is not set, the Share can only be mounted in read only by
+clients with IPs matching the IP ACL of the Resource Pool hosting this Share.
+You can also assign a Share Export Policy after creating the Share
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`share_id`".
+- `mount_path` (String) Mount path of the Share, used to mount the Share
+- `name` (String) Name of the Share
+- `space_hard_limit_gigabytes` (Number) Space hard limit for the Share.
+ If zero, the Share will have access to the full space of the Resource Pool it lives in.
+ (unit: gigabytes)
diff --git a/docs/index.md b/docs/index.md
index ce090ead6..095ddaeb7 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -188,6 +188,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de
- `service_account_key_path` (String) Path for the service account key used for authentication. If set, the key flow will be used to authenticate all operations.
- `service_account_token` (String, Deprecated) Token used for authentication. If set, the token flow will be used to authenticate all operations.
- `service_enablement_custom_endpoint` (String) Custom endpoint for the Service Enablement API
+- `sfs_custom_endpoint` (String) Custom endpoint for the Stackit Filestorage API
- `ske_custom_endpoint` (String) Custom endpoint for the Kubernetes Engine (SKE) service
- `sqlserverflex_custom_endpoint` (String) Custom endpoint for the SQL Server Flex service
- `token_custom_endpoint` (String) Custom endpoint for the token API, which is used to request access tokens when using the key flow
diff --git a/docs/resources/sfs_export_policy.md b/docs/resources/sfs_export_policy.md
new file mode 100644
index 000000000..08e24c026
--- /dev/null
+++ b/docs/resources/sfs_export_policy.md
@@ -0,0 +1,68 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_export_policy Resource - stackit"
+subcategory: ""
+description: |-
+ SFS export policy resource schema. Must have a region specified in the provider configuration.
+ ~> This resource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_export_policy (Resource)
+
+SFS export policy resource schema. Must have a `region` specified in the provider configuration.
+
+~> This resource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+resource "stackit_sfs_export_policy" "example" {
+ project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ name = "example"
+ rules = [
+ {
+ ip_acl = ["172.16.0.0/24", "172.16.0.250/32"]
+ order = 1
+ }
+ ]
+}
+
+# Only use the import statement, if you want to import an existing export policy
+import {
+ to = stackit_sfs_export_policy.example
+ id = "${var.project_id},${var.region},${var.policy_id}"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `name` (String) Name of the export policy.
+- `project_id` (String) STACKIT project ID to which the export policy is associated.
+
+### Optional
+
+- `region` (String) The resource region. If not defined, the provider region is used.
+- `rules` (Attributes List) (see [below for nested schema](#nestedatt--rules))
+
+### Read-Only
+
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`policy_id`".
+- `policy_id` (String) Export policy ID
+
+
+### Nested Schema for `rules`
+
+Required:
+
+- `ip_acl` (List of String) IP access control list; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).
+- `order` (Number) Order of the rule within a Share Export Policy. The order is used so that when a client IP matches multiple rules, the first rule is applied
+
+Optional:
+
+- `description` (String) Description of the Rule
+- `read_only` (Boolean) Flag to indicate if client IPs matching this rule can only mount the share in read only mode
+- `set_uuid` (Boolean) Flag to honor set UUID
+- `super_user` (Boolean) Flag to indicate if client IPs matching this rule have root access on the Share
diff --git a/docs/resources/sfs_resource_pool.md b/docs/resources/sfs_resource_pool.md
new file mode 100644
index 000000000..572ae0f8d
--- /dev/null
+++ b/docs/resources/sfs_resource_pool.md
@@ -0,0 +1,59 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_resource_pool Resource - stackit"
+subcategory: ""
+description: |-
+ Resource-pool resource schema. Must have a region specified in the provider configuration.
+ ~> This resource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_resource_pool (Resource)
+
+Resource-pool resource schema. Must have a `region` specified in the provider configuration.
+
+~> This resource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+resource "stackit_sfs_resource_pool" "resourcepool" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ name = "some-resourcepool"
+ availability_zone = "eu01-m"
+ performance_class = "Standard"
+ size_gigabytes = 512
+ ip_acl = [
+ "192.168.42.1/32",
+ "192.168.42.2/32"
+ ]
+ snapshots_are_visible = true
+}
+
+# Only use the import statement, if you want to import an existing resource pool
+import {
+ to = stackit_sfs_resource_pool.resourcepool
+ id = "${var.project_id},${var.region},${var.resource_pool_id}"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `availability_zone` (String) Availability zone.
+- `ip_acl` (List of String) List of IPs that can mount the resource pool in read-only; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).
+- `name` (String) Name of the resource pool.
+- `performance_class` (String) Name of the performance class.
+- `project_id` (String) STACKIT project ID to which the resource pool is associated.
+- `size_gigabytes` (Number) Size of the resource pool (unit: gigabytes)
+
+### Optional
+
+- `region` (String) The resource region. If not defined, the provider region is used.
+- `snapshots_are_visible` (Boolean) If set to true, snapshots are visible and accessible to users. (default: false)
+
+### Read-Only
+
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`resource_pool_id`".
+- `resource_pool_id` (String) Resource pool ID
diff --git a/docs/resources/sfs_share.md b/docs/resources/sfs_share.md
new file mode 100644
index 000000000..5b93d514f
--- /dev/null
+++ b/docs/resources/sfs_share.md
@@ -0,0 +1,58 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sfs_share Resource - stackit"
+subcategory: ""
+description: |-
+ SFS Share schema. Must have a region specified in the provider configuration.
+ ~> This resource is in beta and may be subject to breaking changes in the future. Use with caution. See our guide https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources for how to opt-in to use beta resources.
+---
+
+# stackit_sfs_share (Resource)
+
+SFS Share schema. Must have a `region` specified in the provider configuration.
+
+~> This resource is in beta and may be subject to breaking changes in the future. Use with caution. See our [guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources) for how to opt-in to use beta resources.
+
+## Example Usage
+
+```terraform
+resource "stackit_sfs_share" "example" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+ name = "my-nfs-share"
+ export_policy = "high-performance-class"
+ space_hard_limit_gigabytes = 32
+}
+
+# Only use the import statement, if you want to import an existing sfs share
+import {
+ to = stackit_sfs_resource_pool.resourcepool
+ id = "${var.project_id},${var.region},${var.resource_pool_id},${var.share_id}"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `export_policy` (String) Name of the Share Export Policy to use in the Share.
+Note that if this is set to an empty string, the Share can only be mounted in read only by
+clients with IPs matching the IP ACL of the Resource Pool hosting this Share.
+You can also assign a Share Export Policy after creating the Share
+- `name` (String) Name of the share.
+- `project_id` (String) STACKIT project ID to which the share is associated.
+- `resource_pool_id` (String) The ID of the resource pool for the SFS share.
+- `space_hard_limit_gigabytes` (Number) Space hard limit for the Share.
+ If zero, the Share will have access to the full space of the Resource Pool it lives in.
+ (unit: gigabytes)
+
+### Optional
+
+- `region` (String) The resource region. If not defined, the provider region is used.
+
+### Read-Only
+
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`resource_pool_id`,`share_id`".
+- `mount_path` (String) Mount path of the Share, used to mount the Share
+- `share_id` (String) share ID
diff --git a/examples/data-sources/stackit_sfs_export_policy/data-source.tf b/examples/data-sources/stackit_sfs_export_policy/data-source.tf
new file mode 100644
index 000000000..3a048e679
--- /dev/null
+++ b/examples/data-sources/stackit_sfs_export_policy/data-source.tf
@@ -0,0 +1,4 @@
+data "stackit_sfs_export_policy" "example" {
+ project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ policy_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+}
diff --git a/examples/data-sources/stackit_sfs_resource_pool/data-source.tf b/examples/data-sources/stackit_sfs_resource_pool/data-source.tf
new file mode 100644
index 000000000..21d70193a
--- /dev/null
+++ b/examples/data-sources/stackit_sfs_resource_pool/data-source.tf
@@ -0,0 +1,4 @@
+data "stackit_sfs_resource_pool" "resourcepool" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+}
diff --git a/examples/data-sources/stackit_sfs_resource_pool_snapshot/data-source.tf b/examples/data-sources/stackit_sfs_resource_pool_snapshot/data-source.tf
new file mode 100644
index 000000000..389932720
--- /dev/null
+++ b/examples/data-sources/stackit_sfs_resource_pool_snapshot/data-source.tf
@@ -0,0 +1,4 @@
+data "stackit_sfs_resource_pool_snapshot" "example" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+}
\ No newline at end of file
diff --git a/examples/data-sources/stackit_sfs_share/data-source.tf b/examples/data-sources/stackit_sfs_share/data-source.tf
new file mode 100644
index 000000000..9edd69ff7
--- /dev/null
+++ b/examples/data-sources/stackit_sfs_share/data-source.tf
@@ -0,0 +1,5 @@
+data "stackit_sfs_share" "example" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ share_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+}
diff --git a/examples/resources/stackit_sfs_export_policy/resource.tf b/examples/resources/stackit_sfs_export_policy/resource.tf
new file mode 100644
index 000000000..fada10fc8
--- /dev/null
+++ b/examples/resources/stackit_sfs_export_policy/resource.tf
@@ -0,0 +1,16 @@
+resource "stackit_sfs_export_policy" "example" {
+ project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ name = "example"
+ rules = [
+ {
+ ip_acl = ["172.16.0.0/24", "172.16.0.250/32"]
+ order = 1
+ }
+ ]
+}
+
+# Only use the import statement, if you want to import an existing export policy
+import {
+ to = stackit_sfs_export_policy.example
+ id = "${var.project_id},${var.region},${var.policy_id}"
+}
\ No newline at end of file
diff --git a/examples/resources/stackit_sfs_resource_pool/resource.tf b/examples/resources/stackit_sfs_resource_pool/resource.tf
new file mode 100644
index 000000000..0fb901735
--- /dev/null
+++ b/examples/resources/stackit_sfs_resource_pool/resource.tf
@@ -0,0 +1,18 @@
+resource "stackit_sfs_resource_pool" "resourcepool" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ name = "some-resourcepool"
+ availability_zone = "eu01-m"
+ performance_class = "Standard"
+ size_gigabytes = 512
+ ip_acl = [
+ "192.168.42.1/32",
+ "192.168.42.2/32"
+ ]
+ snapshots_are_visible = true
+}
+
+# Only use the import statement, if you want to import an existing resource pool
+import {
+ to = stackit_sfs_resource_pool.resourcepool
+ id = "${var.project_id},${var.region},${var.resource_pool_id}"
+}
\ No newline at end of file
diff --git a/examples/resources/stackit_sfs_share/resource.tf b/examples/resources/stackit_sfs_share/resource.tf
new file mode 100644
index 000000000..9359ce06a
--- /dev/null
+++ b/examples/resources/stackit_sfs_share/resource.tf
@@ -0,0 +1,13 @@
+resource "stackit_sfs_share" "example" {
+ project_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
+ resource_pool_id = "YYYYYYYY-YYYY-YYYY-YYYY-YYYYYYYYYYYY"
+ name = "my-nfs-share"
+ export_policy = "high-performance-class"
+ space_hard_limit_gigabytes = 32
+}
+
+# Only use the import statement, if you want to import an existing sfs share
+import {
+ to = stackit_sfs_resource_pool.resourcepool
+ id = "${var.project_id},${var.region},${var.resource_pool_id},${var.share_id}"
+}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index c938e1779..4be8fd6f9 100644
--- a/go.mod
+++ b/go.mod
@@ -36,6 +36,7 @@ require (
github.com/stackitcloud/stackit-sdk-go/services/serverupdate v1.2.1
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.11.1
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.2.2
+ github.com/stackitcloud/stackit-sdk-go/services/sfs v0.2.0
github.com/stackitcloud/stackit-sdk-go/services/ske v1.4.0
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.3.3
github.com/teambition/rrule-go v1.8.2
diff --git a/go.sum b/go.sum
index 897ebf8d4..c872fd30a 100644
--- a/go.sum
+++ b/go.sum
@@ -201,6 +201,8 @@ github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.11.1 h1:crKlHl
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.11.1/go.mod h1:QCrAW/Rmf+styT25ke8cUV6hDHpdKNmAY14kkJ3+Fd8=
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.2.2 h1:s2iag/Gc4tuQH7x5I0n4mQWVhpfl/cj+SVNAFAB5ck0=
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.2.2/go.mod h1:DFEamKVoOjm/rjMwzfZK0Zg/hwsSkXOibdA4HcC6swk=
+github.com/stackitcloud/stackit-sdk-go/services/sfs v0.2.0 h1:DRp1p0Gb1YZSnFXgkiKTHQD9bFfqn6OC3PcsDjqGJiw=
+github.com/stackitcloud/stackit-sdk-go/services/sfs v0.2.0/go.mod h1:XHOtGgBwwCqPSoQt2ojIRb/BeOd4kICwb9RuMXXFGt8=
github.com/stackitcloud/stackit-sdk-go/services/ske v1.4.0 h1:V6RFvybDeJvvmT3g7/BZodF0gozz3TEpahbpiTftbeY=
github.com/stackitcloud/stackit-sdk-go/services/ske v1.4.0/go.mod h1:xRBgpJ8P5Nf1T5tD0tGAeNg1FNQzx5VF7qqOXt2Fp3s=
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.3.3 h1:TFefEGGxvcI7euqyosbLS/zSEOy+3JMGOirW3vNj/84=
diff --git a/stackit/internal/core/core.go b/stackit/internal/core/core.go
index e3dd02e0f..688335ec0 100644
--- a/stackit/internal/core/core.go
+++ b/stackit/internal/core/core.go
@@ -68,6 +68,7 @@ type ProviderData struct {
ServerUpdateCustomEndpoint string
SKECustomEndpoint string
ServiceEnablementCustomEndpoint string
+ SfsCustomEndpoint string
ServiceAccountCustomEndpoint string
EnableBetaResources bool
Experiments []string
diff --git a/stackit/internal/services/sfs/export-policy/datasource.go b/stackit/internal/services/sfs/export-policy/datasource.go
new file mode 100644
index 000000000..2c21ea969
--- /dev/null
+++ b/stackit/internal/services/sfs/export-policy/datasource.go
@@ -0,0 +1,184 @@
+package exportpolicy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+)
+
+var (
+ _ datasource.DataSource = (*exportPolicyDataSource)(nil)
+ _ datasource.DataSourceWithConfigure = (*exportPolicyDataSource)(nil)
+)
+
+type exportPolicyDataSource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+// Metadata implements datasource.DataSource.
+func (d *exportPolicyDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_export_policy"
+}
+
+// Configure implements datasource.DataSourceWithConfigure.
+func (d *exportPolicyDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &d.providerData, &resp.Diagnostics, "stackit_sfs_export_policy", core.Datasource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &d.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ d.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+func NewExportPolicyDataSource() datasource.DataSource {
+ return &exportPolicyDataSource{}
+}
+
+// Read implements datasource.DataSource.
+func (d *exportPolicyDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { //nolint:gocritic // defined by terraform api
+ var model Model
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ exportPolicyId := model.ExportPolicyId.ValueString()
+ region := d.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "policy_id", exportPolicyId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ // get export policy
+ exportPolicyResp, err := d.client.GetShareExportPolicy(ctx, projectId, region, exportPolicyId).Execute()
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading export policy", fmt.Sprintf("Calling API to get export policy: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // map export policy
+ err = mapFields(ctx, exportPolicyResp, &model, region)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading export policy", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "SFS export policy read")
+}
+
+// Schema implements datasource.DataSource.
+func (d *exportPolicyDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ description := "SFS export policy datasource schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ Description: description,
+ MarkdownDescription: features.AddBetaDescription(description, core.Datasource),
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`policy_id`\".",
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the export policy is associated.",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "policy_id": schema.StringAttribute{
+ Description: "Export policy ID",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "name": schema.StringAttribute{
+ Description: "Name of the export policy.",
+ Computed: true,
+ },
+ "rules": schema.ListNestedAttribute{
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "description": schema.StringAttribute{
+ Optional: true,
+ Description: "Description of the Rule",
+ },
+ "ip_acl": schema.ListAttribute{
+ ElementType: types.StringType,
+ Computed: true,
+ Description: `IP access control list; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).`,
+ },
+ "order": schema.Int64Attribute{
+ Description: "Order of the rule within a Share Export Policy. The order is used so that when a client IP matches multiple rules, the first rule is applied",
+ Computed: true,
+ },
+ "read_only": schema.BoolAttribute{
+ Description: "Flag to indicate if client IPs matching this rule can only mount the share in read only mode",
+ Computed: true,
+ },
+ "set_uuid": schema.BoolAttribute{
+ Description: "Flag to honor set UUID",
+ Computed: true,
+ },
+ "super_user": schema.BoolAttribute{
+ Description: "Flag to indicate if client IPs matching this rule have root access on the Share",
+ Computed: true,
+ },
+ },
+ },
+ },
+ "region": schema.StringAttribute{
+ Optional: true,
+ // the region cannot be found, so it has to be passed
+ Computed: true,
+ Description: "The resource region. If not defined, the provider region is used.",
+ },
+ },
+ }
+}
diff --git a/stackit/internal/services/sfs/export-policy/resource.go b/stackit/internal/services/sfs/export-policy/resource.go
new file mode 100644
index 000000000..01f6cc3d4
--- /dev/null
+++ b/stackit/internal/services/sfs/export-policy/resource.go
@@ -0,0 +1,627 @@
+package exportpolicy
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ resource.Resource = &exportPolicyResource{}
+ _ resource.ResourceWithConfigure = &exportPolicyResource{}
+ _ resource.ResourceWithImportState = &exportPolicyResource{}
+ _ resource.ResourceWithModifyPlan = &exportPolicyResource{}
+)
+
+type Model struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ ProjectId types.String `tfsdk:"project_id"`
+ ExportPolicyId types.String `tfsdk:"policy_id"`
+ Name types.String `tfsdk:"name"`
+ Rules types.List `tfsdk:"rules"`
+ Region types.String `tfsdk:"region"`
+}
+
+type rulesModel struct {
+ Description types.String `tfsdk:"description"`
+ IpAcl types.List `tfsdk:"ip_acl"`
+ Order types.Int64 `tfsdk:"order"`
+ ReadOnly types.Bool `tfsdk:"read_only"`
+ SetUuid types.Bool `tfsdk:"set_uuid"`
+ SuperUser types.Bool `tfsdk:"super_user"`
+}
+
+// Types corresponding to rulesModel
+var rulesTypes = map[string]attr.Type{
+ "description": types.StringType,
+ "ip_acl": types.ListType{ElemType: types.StringType},
+ "order": types.Int64Type,
+ "read_only": types.BoolType,
+ "set_uuid": types.BoolType,
+ "super_user": types.BoolType,
+}
+
+func NewExportPolicyResource() resource.Resource {
+ return &exportPolicyResource{}
+}
+
+type exportPolicyResource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+// ModifyPlan implements resource.ResourceWithModifyPlan.
+// Use the modifier to set the effective region in the current plan.
+func (r *exportPolicyResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { // nolint:gocritic // function signature required by Terraform
+ var configModel Model
+ // skip initial empty configuration to avoid follow-up errors
+ if req.Config.Raw.IsNull() {
+ return
+ }
+ resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ var planModel Model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // If rules were completely removed from the config this is not recognized by terraform
+ // since this field is optional and computed therefore this plan modifier is needed.
+ utils.CheckListRemoval(ctx, configModel.Rules, planModel.Rules, path.Root("rules"), types.ObjectType{AttrTypes: rulesTypes}, true, resp)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
+
+// Metadata returns the resource type name.
+func (r *exportPolicyResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_export_policy"
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *exportPolicyResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_sfs_export_policy", core.Resource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+// Schema defines the schema for the resource.
+func (r *exportPolicyResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ description := "SFS export policy resource schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ Description: description,
+ MarkdownDescription: features.AddBetaDescription(description, core.Resource),
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`policy_id`\".",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the export policy is associated.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "policy_id": schema.StringAttribute{
+ Description: "Export policy ID",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "name": schema.StringAttribute{
+ Description: "Name of the export policy.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(1),
+ },
+ },
+ "rules": schema.ListNestedAttribute{
+ Computed: true,
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "description": schema.StringAttribute{
+ Optional: true,
+ Description: "Description of the Rule",
+ },
+ "ip_acl": schema.ListAttribute{
+ ElementType: types.StringType,
+ Required: true,
+ Description: `IP access control list; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).`,
+ Validators: []validator.List{
+ listvalidator.SizeAtLeast(1),
+ listvalidator.ValueStringsAre(validate.CIDR()),
+ },
+ },
+ "order": schema.Int64Attribute{
+ Description: "Order of the rule within a Share Export Policy. The order is used so that when a client IP matches multiple rules, the first rule is applied",
+ Required: true,
+ },
+ "read_only": schema.BoolAttribute{
+ Description: "Flag to indicate if client IPs matching this rule can only mount the share in read only mode",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(false),
+ },
+ "set_uuid": schema.BoolAttribute{
+ Description: "Flag to honor set UUID",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(false),
+ },
+ "super_user": schema.BoolAttribute{
+ Description: "Flag to indicate if client IPs matching this rule have root access on the Share",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ },
+ },
+ "region": schema.StringAttribute{
+ Optional: true,
+ // must be computed to allow for storing the override value from the provider
+ Computed: true,
+ Description: "The resource region. If not defined, the provider region is used.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ },
+ }
+}
+
+// Create creates the resource and sets the initial Terraform state.
+func (r *exportPolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { //nolint:gocritic // defined by terraform api
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ var rules = []rulesModel{}
+ if !(model.Rules.IsNull() || model.Rules.IsUnknown()) {
+ diags = model.Rules.ElementsAs(ctx, &rules, false)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ payload, err := toCreatePayload(&model, rules)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating export policy", fmt.Sprintf("Creating API payload: %v", err))
+ return
+ }
+
+ createResp, err := r.client.CreateShareExportPolicy(ctx, projectId, region).CreateShareExportPolicyPayload(*payload).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating export policy", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ if createResp == nil || createResp.ShareExportPolicy == nil || createResp.ShareExportPolicy.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating export policy", "response did not contain an ID")
+ return
+ }
+ // Write id attributes to state before polling via the wait handler - just in case anything goes wrong during the wait handler
+ utils.SetAndLogStateFields(ctx, &resp.Diagnostics, &resp.State, map[string]any{
+ "project_id": projectId,
+ "region": region,
+ "policy_id": *createResp.ShareExportPolicy.Id,
+ })
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // get export policy
+ getResp, err := r.client.GetShareExportPolicy(ctx, projectId, region, *createResp.ShareExportPolicy.Id).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating export policy", fmt.Sprintf("Calling API to get export policy: %v", err))
+ return
+ }
+
+ err = mapFields(ctx, getResp, &model, region)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating export policy", fmt.Sprintf("Processing API response: %v", err))
+ return
+ }
+
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS export policy created")
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *exportPolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { //nolint:gocritic // defined by terraform api
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ exportPolicyId := model.ExportPolicyId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "policy_id", exportPolicyId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ // get export policy
+ exportPolicyResp, err := r.client.GetShareExportPolicy(ctx, projectId, region, exportPolicyId).Execute()
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading export policy", fmt.Sprintf("Calling API to get export policy: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // map export policy
+ err = mapFields(ctx, exportPolicyResp, &model, region)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading export policy", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "SFS export policy read")
+}
+
+// Update updates the resource and sets the updated Terraform state on success.
+func (r *exportPolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { //nolint:gocritic // defined by terraform api
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ exportPolicyId := model.ExportPolicyId.ValueString()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "policy_id", exportPolicyId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ var rules = []rulesModel{}
+ if !(model.Rules.IsNull() || model.Rules.IsUnknown()) {
+ diags = model.Rules.ElementsAs(ctx, &rules, false)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ payload, err := toUpdatePayload(&model, rules)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating export policy", fmt.Sprintf("Creating API payload: %v", err))
+ return
+ }
+
+ _, err = r.client.UpdateShareExportPolicy(ctx, projectId, region, exportPolicyId).UpdateShareExportPolicyPayload(*payload).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating export policy", fmt.Sprintf("Calling API to update export policy: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // get export policy
+ exportPolicyResp, err := r.client.GetShareExportPolicy(ctx, projectId, region, exportPolicyId).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating export policy", fmt.Sprintf("Calling API to get export policy: %v", err))
+ return
+ }
+
+ // map export policy
+ err = mapFields(ctx, exportPolicyResp, &model, region)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating export policy", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Info(ctx, "SFS export policy update")
+}
+
+// Delete deletes the resource and removes the Terraform state on success.
+func (r *exportPolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { //nolint:gocritic // defined by terraform api
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ exportPolicyId := model.ExportPolicyId.ValueString()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "policy_id", exportPolicyId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ _, err := r.client.DeleteShareExportPolicy(ctx, projectId, region, exportPolicyId).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting export policy", fmt.Sprintf("Calling API: %v", err))
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ tflog.Info(ctx, "SFS export policy delete")
+}
+
+// ImportState imports a resource into the Terraform state on success.
+// The expected format of the export policy resource import identifier is: project_id,region,policy_id
+func (r *exportPolicyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ idParts := strings.Split(req.ID, core.Separator)
+
+ if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
+ core.LogAndAddError(ctx, &resp.Diagnostics,
+ "Error importing export policy",
+ fmt.Sprintf("Expected import identifier with format: [project_id],[region],[policy_id] Got: %q", req.ID),
+ )
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("policy_id"), idParts[2])...)
+
+ tflog.Info(ctx, "SFS export policy state import")
+}
+
+// Maps bar fields to the provider's internal model
+func mapFields(ctx context.Context, resp *sfs.GetShareExportPolicyResponse, model *Model, region string) error {
+ if resp == nil || resp.ShareExportPolicy == nil {
+ return fmt.Errorf("response input is nil")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+
+ var exportPolicyId string
+ if model.ExportPolicyId.ValueString() != "" {
+ exportPolicyId = model.ExportPolicyId.ValueString()
+ } else if resp.ShareExportPolicy.Id != nil {
+ exportPolicyId = *resp.ShareExportPolicy.Id
+ } else {
+ return fmt.Errorf("export policy id not present")
+ }
+
+ // iterate over Rules from response
+ if resp.ShareExportPolicy.Rules != nil {
+ rulesList := []attr.Value{}
+ for _, rule := range *resp.ShareExportPolicy.Rules {
+ var ipAcl basetypes.ListValue
+ if rule.IpAcl != nil {
+ var diags diag.Diagnostics
+ ipAcl, diags = types.ListValueFrom(ctx, types.StringType, rule.IpAcl)
+ if diags.HasError() {
+ return fmt.Errorf("failed to map ip acls: %w", core.DiagsToError(diags))
+ }
+ } else {
+ ipAcl = types.ListNull(types.StringType)
+ }
+
+ rulesValues := map[string]attr.Value{
+ "description": types.StringPointerValue(rule.GetDescription()),
+ "ip_acl": ipAcl,
+ "order": types.Int64PointerValue(rule.Order),
+ "read_only": types.BoolPointerValue(rule.ReadOnly),
+ "set_uuid": types.BoolPointerValue(rule.SetUuid),
+ "super_user": types.BoolPointerValue(rule.SuperUser),
+ }
+
+ ruleModel, diags := types.ObjectValue(rulesTypes, rulesValues)
+ if diags.HasError() {
+ return fmt.Errorf("converting rule to TF types: %w", core.DiagsToError(diags))
+ }
+
+ rulesList = append(rulesList, ruleModel)
+ }
+
+ convertedRulesList, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: rulesTypes}, rulesList)
+ if diags.HasError() {
+ return fmt.Errorf("mapping rules list: %w", core.DiagsToError(diags))
+ }
+
+ model.Rules = convertedRulesList
+ }
+
+ model.Id = utils.BuildInternalTerraformId(
+ model.ProjectId.ValueString(),
+ region,
+ exportPolicyId,
+ )
+ model.ExportPolicyId = types.StringValue(exportPolicyId)
+ model.Name = types.StringPointerValue(resp.ShareExportPolicy.Name)
+ model.Region = types.StringValue(region)
+
+ return nil
+}
+
+// Build CreateBarPayload from provider's model
+func toCreatePayload(model *Model, rules []rulesModel) (*sfs.CreateShareExportPolicyPayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+ if rules == nil {
+ return nil, fmt.Errorf("nil rules")
+ }
+
+ // iterate over rules
+ var tempRules []sfs.CreateShareExportPolicyRequestRule
+ for _, rule := range rules {
+ // convert list
+ convertedList, err := conversion.StringListToPointer(rule.IpAcl)
+ if err != nil {
+ return nil, fmt.Errorf("conversion of rule failed")
+ }
+ tempRule := sfs.CreateShareExportPolicyRequestRule{
+ Description: sfs.NewNullableString(conversion.StringValueToPointer(rule.Description)),
+ IpAcl: convertedList,
+ Order: conversion.Int64ValueToPointer(rule.Order),
+ ReadOnly: conversion.BoolValueToPointer(rule.ReadOnly),
+ SetUuid: conversion.BoolValueToPointer(rule.SetUuid),
+ SuperUser: conversion.BoolValueToPointer(rule.SuperUser),
+ }
+ tempRules = append(tempRules, tempRule)
+ }
+
+ // name and rules
+ result := &sfs.CreateShareExportPolicyPayload{
+ Name: model.Name.ValueStringPointer(),
+ }
+
+ // Rules should only be set if tempRules has value. Otherwise, the payload would contain `{ "rules": null }` what should be prevented
+ if tempRules != nil {
+ result.Rules = &tempRules
+ }
+
+ return result, nil
+}
+
+func toUpdatePayload(model *Model, rules []rulesModel) (*sfs.UpdateShareExportPolicyPayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+ if rules == nil {
+ return nil, fmt.Errorf("nil rules")
+ }
+
+ // iterate over rules
+ tempRules := make([]sfs.UpdateShareExportPolicyBodyRule, len(rules))
+ for i, rule := range rules {
+ // convert list
+ convertedList, err := conversion.StringListToPointer(rule.IpAcl)
+ if err != nil {
+ return nil, fmt.Errorf("conversion of rule failed")
+ }
+ tempRule := sfs.UpdateShareExportPolicyBodyRule{
+ Description: sfs.NewNullableString(conversion.StringValueToPointer(rule.Description)),
+ IpAcl: convertedList,
+ Order: conversion.Int64ValueToPointer(rule.Order),
+ ReadOnly: conversion.BoolValueToPointer(rule.ReadOnly),
+ SetUuid: conversion.BoolValueToPointer(rule.SetUuid),
+ SuperUser: conversion.BoolValueToPointer(rule.SuperUser),
+ }
+ tempRules[i] = tempRule
+ }
+
+ // only rules
+ result := &sfs.UpdateShareExportPolicyPayload{
+ // Rules should *+never** result in a payload where they are defined as null, e.g. `{ "rules": null }`. Instead,
+ // they should either be set to an array (with values or empty) or they shouldn't be present in the payload.
+ Rules: &tempRules,
+ }
+ return result, nil
+}
diff --git a/stackit/internal/services/sfs/export-policy/resource_test.go b/stackit/internal/services/sfs/export-policy/resource_test.go
new file mode 100644
index 000000000..ee939d703
--- /dev/null
+++ b/stackit/internal/services/sfs/export-policy/resource_test.go
@@ -0,0 +1,359 @@
+package exportpolicy
+
+import (
+ "context"
+ _ "embed"
+ "reflect"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+// global stuff
+var project_id = "project_id"
+
+func fixtureRulesResponse() *[]sfs.ShareExportPolicyRule {
+ return &[]sfs.ShareExportPolicyRule{
+ {
+ Description: sfs.NewNullableString(utils.Ptr("description")),
+ IpAcl: utils.Ptr([]string{"172.16.0.0/24", "172.16.0.251/32"}),
+ Order: utils.Ptr(int64(0)),
+ ReadOnly: utils.Ptr(false),
+ SetUuid: utils.Ptr(false),
+ SuperUser: utils.Ptr(false),
+ },
+ {
+ Description: sfs.NewNullableString(utils.Ptr("description")),
+ IpAcl: utils.Ptr([]string{"172.32.0.0/24", "172.32.0.251/32"}),
+ Order: utils.Ptr(int64(1)),
+ ReadOnly: utils.Ptr(false),
+ SetUuid: utils.Ptr(false),
+ SuperUser: utils.Ptr(false),
+ },
+ }
+}
+
+func fixtureRulesModel() basetypes.ListValue {
+ // create the list
+ return types.ListValueMust(types.ObjectType{AttrTypes: rulesTypes}, []attr.Value{
+ types.ObjectValueMust(rulesTypes, map[string]attr.Value{
+ "description": types.StringValue("description"),
+ "ip_acl": types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("172.16.0.0/24"),
+ types.StringValue("172.16.0.251/32"),
+ }),
+ "order": types.Int64Value(0),
+ "read_only": types.BoolValue(false),
+ "set_uuid": types.BoolValue(false),
+ "super_user": types.BoolValue(false),
+ }),
+ types.ObjectValueMust(rulesTypes, map[string]attr.Value{
+ "description": types.StringValue("description"),
+ "ip_acl": types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("172.32.0.0/24"),
+ types.StringValue("172.32.0.251/32"),
+ }),
+ "order": types.Int64Value(1),
+ "read_only": types.BoolValue(false),
+ "set_uuid": types.BoolValue(false),
+ "super_user": types.BoolValue(false),
+ }),
+ })
+}
+
+func fixtureResponseModel(rulesModel basetypes.ListValue) *Model {
+ return &Model{
+ ProjectId: types.StringValue(project_id),
+ Id: types.StringValue(project_id + ",region,uuid1"),
+ ExportPolicyId: types.StringValue("uuid1"),
+ Rules: rulesModel,
+ Region: types.StringValue("region"),
+ }
+}
+
+func fixtureRulesCreatePayload() []sfs.CreateShareExportPolicyRequestRule {
+ return []sfs.CreateShareExportPolicyRequestRule{
+ {
+ Description: sfs.NewNullableString(utils.Ptr("description")),
+ IpAcl: &[]string{
+ "172.32.0.0/24",
+ "172.32.0.251/32",
+ },
+ Order: utils.Ptr(int64(0)),
+ ReadOnly: utils.Ptr(false),
+ SetUuid: utils.Ptr(false),
+ SuperUser: utils.Ptr(false),
+ },
+ {
+ Description: sfs.NewNullableString(utils.Ptr("description")),
+ IpAcl: &[]string{
+ "172.16.0.0/24",
+ "172.16.0.251/32",
+ },
+ Order: utils.Ptr(int64(1)),
+ ReadOnly: utils.Ptr(false),
+ SetUuid: utils.Ptr(false),
+ SuperUser: utils.Ptr(false),
+ },
+ }
+}
+
+func fixtureRulesUpdatePayload() []sfs.UpdateShareExportPolicyBodyRule {
+ return []sfs.UpdateShareExportPolicyBodyRule{
+ {
+ Description: sfs.NewNullableString(utils.Ptr("description")),
+ IpAcl: &[]string{
+ "172.32.0.0/24",
+ "172.32.0.251/32",
+ },
+ Order: utils.Ptr(int64(0)),
+ ReadOnly: utils.Ptr(false),
+ SetUuid: utils.Ptr(false),
+ SuperUser: utils.Ptr(false),
+ },
+ {
+ Description: sfs.NewNullableString(utils.Ptr("description")),
+ IpAcl: &[]string{
+ "172.16.0.0/24",
+ "172.16.0.251/32",
+ },
+ Order: utils.Ptr(int64(1)),
+ ReadOnly: utils.Ptr(false),
+ SetUuid: utils.Ptr(false),
+ SuperUser: utils.Ptr(false),
+ },
+ }
+}
+
+func fixtureRulesPayloadModel() []rulesModel {
+ return []rulesModel{
+ {
+ Description: types.StringValue("description"),
+ IpAcl: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("172.32.0.0/24"), types.StringValue("172.32.0.251/32")}),
+ Order: types.Int64Value(0),
+ ReadOnly: types.BoolValue(false),
+ SetUuid: types.BoolValue(false),
+ SuperUser: types.BoolValue(false),
+ },
+ {
+ Description: types.StringValue("description"),
+ IpAcl: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("172.16.0.0/24"), types.StringValue("172.16.0.251/32")}),
+ Order: types.Int64Value(1),
+ ReadOnly: types.BoolValue(false),
+ SetUuid: types.BoolValue(false),
+ SuperUser: types.BoolValue(false),
+ },
+ }
+}
+
+func fixtureExportPolicyCreatePayload(rules *[]sfs.CreateShareExportPolicyRequestRule) *sfs.CreateShareExportPolicyPayload {
+ return &sfs.CreateShareExportPolicyPayload{
+ Name: utils.Ptr("createPayloadName"),
+ Rules: rules,
+ }
+}
+
+func fixtureExportPolicyUpdatePayload(rules []sfs.UpdateShareExportPolicyBodyRule) *sfs.UpdateShareExportPolicyPayload {
+ return &sfs.UpdateShareExportPolicyPayload{
+ Rules: &rules,
+ }
+}
+
+func TestMapFields(t *testing.T) {
+ const testRegion = "region"
+ tests := []struct {
+ name string
+ input *sfs.GetShareExportPolicyResponse
+ state *Model
+ expectedModel *Model
+ isValid bool
+ region string
+ }{
+ {
+ name: "resp is nil",
+ state: &Model{
+ ProjectId: types.StringValue(project_id),
+ },
+ input: nil,
+ region: testRegion,
+ isValid: false,
+ },
+ {
+ name: "shared export policy in response is nil",
+ state: &Model{
+ ProjectId: types.StringValue(project_id),
+ },
+ input: &sfs.GetShareExportPolicyResponse{},
+ region: testRegion,
+ isValid: false,
+ },
+ {
+ name: "rules list is empty",
+ state: &Model{
+ ProjectId: types.StringValue(project_id),
+ },
+ input: &sfs.GetShareExportPolicyResponse{
+ ShareExportPolicy: &sfs.GetShareExportPolicyResponseShareExportPolicy{
+ Id: utils.Ptr("uuid1"),
+ Rules: &[]sfs.ShareExportPolicyRule{},
+ },
+ },
+ expectedModel: fixtureResponseModel(types.ListValueMust(types.ObjectType{AttrTypes: rulesTypes}, []attr.Value{})),
+ region: testRegion,
+ isValid: true,
+ },
+ {
+ name: "normal data",
+ state: &Model{
+ ProjectId: types.StringValue(project_id),
+ },
+ input: &sfs.GetShareExportPolicyResponse{
+ ShareExportPolicy: &sfs.GetShareExportPolicyResponseShareExportPolicy{
+ Id: utils.Ptr("uuid1"),
+ Rules: fixtureRulesResponse(),
+ },
+ },
+ expectedModel: fixtureResponseModel(fixtureRulesModel()),
+ region: testRegion,
+ isValid: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := mapFields(context.Background(), tt.input, tt.state, tt.region)
+ if !tt.isValid && err == nil {
+ t.Fatalf("Should have failed")
+ }
+ if tt.isValid && err != nil {
+ t.Fatalf("Should not have failed: %v", err)
+ }
+ if tt.isValid {
+ diff := cmp.Diff(tt.state, tt.expectedModel)
+ if diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestToCreatePayload(t *testing.T) {
+ tests := []struct {
+ name string
+ model *Model
+ rules []rulesModel
+ expected *sfs.CreateShareExportPolicyPayload
+ wantErr bool
+ }{
+ {
+ name: "nil rules",
+ model: &Model{
+ ProjectId: types.StringValue(project_id),
+ Name: types.StringValue("createPayloadName"),
+ },
+ rules: nil,
+ wantErr: true,
+ },
+ {
+ name: "nil model",
+ model: nil,
+ rules: []rulesModel{},
+ wantErr: true,
+ },
+ {
+ name: "empty rule payload",
+ model: &Model{
+ ProjectId: types.StringValue(project_id),
+ Name: types.StringValue("createPayloadName"),
+ },
+ rules: []rulesModel{},
+ expected: fixtureExportPolicyCreatePayload(nil),
+ wantErr: false,
+ },
+ {
+ name: "valid rule payload",
+ model: &Model{
+ ProjectId: types.StringValue(project_id),
+ Name: types.StringValue("createPayloadName"),
+ },
+ rules: fixtureRulesPayloadModel(),
+ expected: fixtureExportPolicyCreatePayload(utils.Ptr(fixtureRulesCreatePayload())),
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := toCreatePayload(tt.model, tt.rules)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("toCreatePayload() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.expected) {
+ t.Errorf("toCreatePayload() = %v, want %v", got, tt.expected)
+ }
+ })
+ }
+}
+
+func TestToUpdatePayload(t *testing.T) {
+ tests := []struct {
+ name string
+ model *Model
+ rules []rulesModel
+ expected *sfs.UpdateShareExportPolicyPayload
+ wantErr bool
+ }{
+ {
+ name: "nil rules",
+ model: &Model{
+ ProjectId: types.StringValue(project_id),
+ Name: types.StringValue("updatePayloadName"),
+ },
+ rules: nil,
+ wantErr: true,
+ },
+ {
+ name: "nil model",
+ model: nil,
+ rules: []rulesModel{},
+ wantErr: true,
+ },
+ {
+ name: "empty rule payload",
+ model: &Model{
+ ProjectId: types.StringValue(project_id),
+ Name: types.StringValue("createPayloadName"),
+ },
+ rules: []rulesModel{},
+ expected: fixtureExportPolicyUpdatePayload([]sfs.UpdateShareExportPolicyBodyRule{}),
+ wantErr: false,
+ },
+ {
+ name: "valid rule payload",
+ model: &Model{
+ ProjectId: types.StringValue(project_id),
+ Name: types.StringValue("createPayloadName"),
+ },
+ rules: fixtureRulesPayloadModel(),
+ expected: fixtureExportPolicyUpdatePayload(fixtureRulesUpdatePayload()),
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := toUpdatePayload(tt.model, tt.rules)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("toUpdatePayload() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.expected) {
+ t.Errorf("toUpdatePayload() = %v, want %v", got, tt.expected)
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sfs/resourcepool/datasource.go b/stackit/internal/services/sfs/resourcepool/datasource.go
new file mode 100644
index 000000000..1e0efd435
--- /dev/null
+++ b/stackit/internal/services/sfs/resourcepool/datasource.go
@@ -0,0 +1,250 @@
+package resourcepool
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+)
+
+var (
+ _ datasource.DataSource = (*resourcePoolDataSource)(nil)
+ _ datasource.DataSourceWithConfigure = (*resourcePoolDataSource)(nil)
+)
+
+type dataSourceModel struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ ProjectId types.String `tfsdk:"project_id"`
+ ResourcePoolId types.String `tfsdk:"resource_pool_id"`
+ AvailabilityZone types.String `tfsdk:"availability_zone"`
+ IpAcl types.List `tfsdk:"ip_acl"`
+ Name types.String `tfsdk:"name"`
+ PerformanceClass types.String `tfsdk:"performance_class"`
+ SizeGigabytes types.Int64 `tfsdk:"size_gigabytes"`
+ SizeReducibleAt types.String `tfsdk:"size_reducible_at"`
+ PerformanceClassDowngradableAt types.String `tfsdk:"performance_class_downgradable_at"`
+ Region types.String `tfsdk:"region"`
+ SnapshotsAreVisible types.Bool `tfsdk:"snapshots_are_visible"`
+}
+
+type resourcePoolDataSource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+func NewResourcePoolDataSource() datasource.DataSource {
+ return &resourcePoolDataSource{}
+}
+
+// Configure implements datasource.DataSourceWithConfigure.
+func (r *resourcePoolDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_sfs_resource_pool", core.Datasource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+// Metadata implements datasource.DataSource.
+func (r *resourcePoolDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_resource_pool"
+}
+
+// Read implements datasource.DataSource.
+func (r *resourcePoolDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model dataSourceModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ response, err := r.client.GetResourcePoolExecute(ctx, projectId, region, resourcePoolId)
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading resource pool", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // Map response body to schema
+ err = mapDataSourceFields(ctx, region, response.ResourcePool, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading resource pool", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS resource pool read")
+}
+
+// Schema implements datasource.DataSource.
+func (r *resourcePoolDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ description := "Resource-pool datasource schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ MarkdownDescription: features.AddBetaDescription(description, core.Datasource),
+ Description: description,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`resource_pool_id`\".",
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the resource pool is associated.",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "resource_pool_id": schema.StringAttribute{
+ Description: "Resourcepool ID",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "availability_zone": schema.StringAttribute{
+ Computed: true,
+ Description: "Availability zone.",
+ },
+ "ip_acl": schema.ListAttribute{
+ ElementType: types.StringType,
+ Computed: true,
+ Description: `List of IPs that can mount the resource pool in read-only; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).`,
+ Validators: []validator.List{
+ listvalidator.ValueStringsAre(validate.CIDR()),
+ },
+ },
+ "performance_class": schema.StringAttribute{
+ Computed: true,
+ Description: "Name of the performance class.",
+ },
+ "size_gigabytes": schema.Int64Attribute{
+ CustomType: nil,
+ Computed: true,
+ Description: `Size of the resource pool (unit: gigabytes)`,
+ },
+ "name": schema.StringAttribute{
+ Description: "Name of the resource pool.",
+ Computed: true,
+ },
+ "performance_class_downgradable_at": schema.StringAttribute{
+ Computed: true,
+ Description: "Time when the performance class can be downgraded again.",
+ },
+ "size_reducible_at": schema.StringAttribute{
+ Computed: true,
+ Description: "Time when the size can be reduced again.",
+ },
+ "snapshots_are_visible": schema.BoolAttribute{
+ Computed: true,
+ Description: "If set to true, snapshots are visible and accessible to users. (default: false)",
+ },
+ "region": schema.StringAttribute{
+ // the region cannot be found automatically, so it has to be passed
+ Optional: true,
+ Description: "The resource region. Read-only attribute that reflects the provider region.",
+ }},
+ }
+}
+
+func mapDataSourceFields(ctx context.Context, region string, resourcePool *sfs.GetResourcePoolResponseResourcePool, model *dataSourceModel) error {
+ if resourcePool == nil {
+ return fmt.Errorf("resource pool empty in response")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+
+ model.AvailabilityZone = types.StringPointerValue(resourcePool.AvailabilityZone)
+ if resourcePool.Id == nil {
+ return fmt.Errorf("resource pool id not present")
+ }
+ model.ResourcePoolId = types.StringPointerValue(resourcePool.Id)
+ model.Region = types.StringValue(region)
+ model.SnapshotsAreVisible = types.BoolPointerValue(resourcePool.SnapshotsAreVisible)
+ model.Id = utils.BuildInternalTerraformId(
+ model.ProjectId.ValueString(),
+ region,
+ utils.Coalesce(model.ResourcePoolId, types.StringPointerValue(resourcePool.Id)).ValueString(),
+ )
+
+ if resourcePool.IpAcl != nil {
+ var diags diag.Diagnostics
+ model.IpAcl, diags = types.ListValueFrom(ctx, types.StringType, resourcePool.IpAcl)
+ if diags.HasError() {
+ return fmt.Errorf("failed to map ip acls: %w", core.DiagsToError(diags))
+ }
+ } else {
+ model.IpAcl = types.ListNull(types.StringType)
+ }
+
+ model.Name = types.StringPointerValue(resourcePool.Name)
+ if pc := resourcePool.PerformanceClass; pc != nil {
+ model.PerformanceClass = types.StringPointerValue(pc.Name)
+ }
+
+ if resourcePool.Space != nil {
+ model.SizeGigabytes = types.Int64PointerValue(resourcePool.Space.SizeGigabytes)
+ }
+
+ if t := resourcePool.PerformanceClassDowngradableAt; t != nil {
+ model.PerformanceClassDowngradableAt = types.StringValue(t.Format(time.RFC3339))
+ }
+
+ if t := resourcePool.SizeReducibleAt; t != nil {
+ model.SizeReducibleAt = types.StringValue(t.Format(time.RFC3339))
+ }
+
+ return nil
+}
diff --git a/stackit/internal/services/sfs/resourcepool/datasource_test.go b/stackit/internal/services/sfs/resourcepool/datasource_test.go
new file mode 100644
index 000000000..0cc1e629a
--- /dev/null
+++ b/stackit/internal/services/sfs/resourcepool/datasource_test.go
@@ -0,0 +1,112 @@
+package resourcepool
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+func TestMapDatasourceFields(t *testing.T) {
+ now := time.Now()
+ testTime := types.StringValue(now.Format(time.RFC3339))
+ testTimePlus1h := types.StringValue(now.Add(1 * time.Hour).Format(time.RFC3339))
+ tests := []struct {
+ name string
+ state *dataSourceModel
+ region string
+ input *sfs.GetResourcePoolResponseResourcePool
+ expected *dataSourceModel
+ isValid bool
+ }{
+ {
+ "default_values",
+ &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ },
+ "eu01",
+ &sfs.GetResourcePoolResponseResourcePool{
+ Id: testResourcePoolId.ValueStringPointer(),
+ },
+ &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: types.StringNull(),
+ IpAcl: types.ListNull(types.StringType),
+ Name: types.StringNull(),
+ PerformanceClass: types.StringNull(),
+ SizeGigabytes: types.Int64Null(),
+ Region: testRegion,
+ SizeReducibleAt: types.StringNull(),
+ PerformanceClassDowngradableAt: types.StringNull(),
+ },
+ true,
+ },
+ {
+ name: "simple_values",
+ state: &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ },
+ region: "eu01",
+ input: &sfs.GetResourcePoolResponseResourcePool{
+ AvailabilityZone: testAvailabilityZone.ValueStringPointer(),
+ CountShares: utils.Ptr[int64](42),
+ CreatedAt: &now,
+ Id: testResourcePoolId.ValueStringPointer(),
+ IpAcl: &[]string{"foo", "bar", "baz"},
+ MountPath: utils.Ptr("mountpoint"),
+ Name: utils.Ptr("testname"),
+ PerformanceClass: &sfs.ResourcePoolPerformanceClass{
+ Name: utils.Ptr("performance"),
+ PeakIops: utils.Ptr[int64](42),
+ Throughput: utils.Ptr[int64](54),
+ },
+ PerformanceClassDowngradableAt: utils.Ptr(now),
+ SizeReducibleAt: utils.Ptr(now.Add(1 * time.Hour)),
+ Space: &sfs.ResourcePoolSpace{
+ SizeGigabytes: utils.Ptr[int64](42),
+ },
+ State: utils.Ptr("state"),
+ },
+ expected: &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("foo"),
+ types.StringValue("bar"),
+ types.StringValue("baz"),
+ }),
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ Region: testRegion,
+ SizeReducibleAt: testTimePlus1h,
+ PerformanceClassDowngradableAt: testTime,
+ },
+ isValid: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ if err := mapDataSourceFields(ctx, tt.region, tt.input, tt.state); (err == nil) != tt.isValid {
+ t.Errorf("unexpected error")
+ }
+ if tt.isValid {
+ if diff := cmp.Diff(tt.state, tt.expected); diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sfs/resourcepool/resource.go b/stackit/internal/services/sfs/resourcepool/resource.go
new file mode 100644
index 000000000..8e37a2157
--- /dev/null
+++ b/stackit/internal/services/sfs/resourcepool/resource.go
@@ -0,0 +1,563 @@
+package resourcepool
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs/wait"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ coreutils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ resource.Resource = &resourcePoolResource{}
+ _ resource.ResourceWithImportState = &resourcePoolResource{}
+ _ resource.ResourceWithConfigure = &resourcePoolResource{}
+ _ resource.ResourceWithModifyPlan = &resourcePoolResource{}
+)
+
+type Model struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ ProjectId types.String `tfsdk:"project_id"`
+ ResourcePoolId types.String `tfsdk:"resource_pool_id"`
+ AvailabilityZone types.String `tfsdk:"availability_zone"`
+ IpAcl types.List `tfsdk:"ip_acl"`
+ Name types.String `tfsdk:"name"`
+ PerformanceClass types.String `tfsdk:"performance_class"`
+ SizeGigabytes types.Int64 `tfsdk:"size_gigabytes"`
+ Region types.String `tfsdk:"region"`
+ SnapshotsAreVisible types.Bool `tfsdk:"snapshots_are_visible"`
+}
+
+// NewResourcePoolResource is a helper function to simplify the provider implementation.
+func NewResourcePoolResource() resource.Resource {
+ return &resourcePoolResource{}
+}
+
+// resourcePoolResource is the resource implementation.
+type resourcePoolResource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+// ModifyPlan implements resource.ResourceWithModifyPlan.
+func (r *resourcePoolResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { //nolint:gocritic // defined by terraform api
+ var configModel Model
+ // skip initial empty configuration to avoid follow-up errors
+ if req.Config.Raw.IsNull() {
+ return
+ }
+ resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ var planModel Model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ coreutils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
+
+// Metadata returns the resource type name.
+func (r *resourcePoolResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_resource_pool"
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *resourcePoolResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_sfs_resource_pool", core.Resource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+// Schema defines the schema for the resource.
+func (r *resourcePoolResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ description := "Resource-pool resource schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ MarkdownDescription: features.AddBetaDescription(description, core.Resource),
+ Description: description,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`resource_pool_id`\".",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the resource pool is associated.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "region": schema.StringAttribute{
+ Optional: true,
+ // must be computed to allow for storing the override value from the provider
+ Computed: true,
+ Description: "The resource region. If not defined, the provider region is used.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ "resource_pool_id": schema.StringAttribute{
+ Description: "Resource pool ID",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "availability_zone": schema.StringAttribute{
+ Required: true,
+ Description: "Availability zone.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ "ip_acl": schema.ListAttribute{
+ ElementType: types.StringType,
+ Required: true,
+ Description: `List of IPs that can mount the resource pool in read-only; IPs must have a subnet mask (e.g. "172.16.0.0/24" for a range of IPs, or "172.16.0.250/32" for a specific IP).`,
+ Validators: []validator.List{
+ listvalidator.SizeAtLeast(1),
+ listvalidator.ValueStringsAre(validate.CIDR()),
+ },
+ },
+ "performance_class": schema.StringAttribute{
+ Required: true,
+ Description: "Name of the performance class.",
+ },
+ "size_gigabytes": schema.Int64Attribute{
+ Required: true,
+ Description: `Size of the resource pool (unit: gigabytes)`,
+ },
+ "name": schema.StringAttribute{
+ Description: "Name of the resource pool.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ // api does not allow to change the name
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ "snapshots_are_visible": schema.BoolAttribute{
+ Description: "If set to true, snapshots are visible and accessible to users. (default: false)",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(false),
+ },
+ },
+ }
+}
+
+// Create creates the resource and sets the initial Terraform state.
+func (r *resourcePoolResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ projectId := model.ProjectId.ValueString()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ payload, err := toCreatePayload(&model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", fmt.Sprintf("Cannot create payload: %v", err))
+ return
+ }
+
+ // Create new resourcepool
+ resourcePool, err := r.client.CreateResourcePool(ctx, projectId, region).
+ CreateResourcePoolPayload(*payload).
+ Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ if resourcePool == nil || resourcePool.ResourcePool == nil || resourcePool.ResourcePool.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "error creating resource pool", "Calling API: Incomplete response (id missing)")
+ return
+ }
+
+ // Write id attributes to state before polling via the wait handler - just in case anything goes wrong during the wait handler
+ utils.SetAndLogStateFields(ctx, &resp.Diagnostics, &resp.State, map[string]any{
+ "project_id": projectId,
+ "region": region,
+ "resource_pool_id": *resourcePool.ResourcePool.Id,
+ })
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ response, err := wait.CreateResourcePoolWaitHandler(ctx, r.client, projectId, region, *resourcePool.ResourcePool.Id).
+ WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", fmt.Sprintf("resource pool creation waiting: %v", err))
+ return
+ }
+ ctx = tflog.SetField(ctx, "resource_pool_id", response.ResourcePool.Id)
+
+ // the responses of create and update are not compatible, so we can't use a unified
+ // mapFields function. Therefore, we issue a GET request after the create
+ // to get a compatible structure
+ if response.ResourcePool == nil || response.ResourcePool.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", "response did not contain an ID")
+ return
+ }
+ getResponse, err := r.client.GetResourcePoolExecute(ctx, projectId, region, *response.ResourcePool.Id)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", fmt.Sprintf("resource pool get: %v", err))
+ return
+ }
+
+ // Map response body to schema
+ err = mapFields(ctx, region, getResponse.ResourcePool, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS ResourcePool created")
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *resourcePoolResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ response, err := r.client.GetResourcePoolExecute(ctx, projectId, region, resourcePoolId)
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading resource pool", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // Map response body to schema
+ err = mapFields(ctx, region, response.ResourcePool, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading resource pool", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS resource pool read")
+}
+
+// Update updates the resource and sets the updated Terraform state on success.
+func (r *resourcePoolResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ // Retrieve values from state
+ var stateModel Model
+ diags = req.State.Get(ctx, &stateModel)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ payload, err := toUpdatePayload(&model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Update resource pool", fmt.Sprintf("cannot create payload: %v", err))
+ return
+ }
+
+ response, err := r.client.UpdateResourcePool(ctx, projectId, region, resourcePoolId).
+ UpdateResourcePoolPayload(*payload).
+ Execute()
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating resource pool", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // the responses of create and update are not compatible, so we can't use a unified
+ // mapFields function. Therefore, we issue a GET request after the create
+ // to get a compatible structure
+ if response.ResourcePool == nil || response.ResourcePool.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", "response did not contain an ID")
+ return
+ }
+
+ getResponse, err := wait.UpdateResourcePoolWaitHandler(ctx, r.client, projectId, region, resourcePoolId).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating resource pool", fmt.Sprintf("resource pool get: %v", err))
+ return
+ }
+ err = mapFields(ctx, region, getResponse.ResourcePool, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating resource pool", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS resource pool updated")
+}
+
+// Delete deletes the resource and removes the Terraform state on success.
+func (r *resourcePoolResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from state
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ // Delete existing resource pool
+ _, err := r.client.DeleteResourcePoolExecute(ctx, projectId, region, resourcePoolId)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting resource pool", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // only delete, if no error occurred
+ _, err = wait.DeleteResourcePoolWaitHandler(ctx, r.client, projectId, region, resourcePoolId).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting resource pool", fmt.Sprintf("resource pool deletion waiting: %v", err))
+ return
+ }
+
+ tflog.Info(ctx, "SFS resource pool deleted")
+}
+
+// ImportState implements resource.ResourceWithImportState.
+func (r *resourcePoolResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ idParts := strings.Split(req.ID, core.Separator)
+ if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
+ core.LogAndAddError(ctx, &resp.Diagnostics,
+ "Error importing resource pool",
+ fmt.Sprintf("Expected import identifier with format [project_id],[region],[resource_pool_id], got %q", req.ID),
+ )
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("resource_pool_id"), idParts[2])...)
+
+ tflog.Info(ctx, "SFS resource pool imported")
+}
+
+func mapFields(ctx context.Context, region string, resourcePool *sfs.GetResourcePoolResponseResourcePool, model *Model) error {
+ if resourcePool == nil {
+ return fmt.Errorf("resource pool empty in response")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+
+ if resourcePool.Id == nil {
+ return fmt.Errorf("resource pool id not present")
+ }
+ model.Region = types.StringValue(region)
+ model.Id = utils.BuildInternalTerraformId(
+ model.ProjectId.ValueString(),
+ region,
+ utils.Coalesce(model.ResourcePoolId, types.StringPointerValue(resourcePool.Id)).ValueString(),
+ )
+ model.AvailabilityZone = types.StringPointerValue(resourcePool.AvailabilityZone)
+ model.ResourcePoolId = types.StringPointerValue(resourcePool.Id)
+ model.SnapshotsAreVisible = types.BoolPointerValue(resourcePool.SnapshotsAreVisible)
+
+ if resourcePool.IpAcl != nil {
+ var diags diag.Diagnostics
+ model.IpAcl, diags = types.ListValueFrom(ctx, types.StringType, resourcePool.IpAcl)
+ if diags.HasError() {
+ return fmt.Errorf("failed to map ip acls: %w", core.DiagsToError(diags))
+ }
+ } else {
+ model.IpAcl = types.ListNull(types.StringType)
+ }
+
+ model.Name = types.StringPointerValue(resourcePool.Name)
+ if pc := resourcePool.PerformanceClass; pc != nil {
+ model.PerformanceClass = types.StringPointerValue(pc.Name)
+ }
+
+ if resourcePool.Space != nil {
+ model.SizeGigabytes = types.Int64PointerValue(resourcePool.Space.SizeGigabytes)
+ }
+
+ return nil
+}
+
+func toCreatePayload(model *Model) (*sfs.CreateResourcePoolPayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+ var (
+ aclList *[]string
+ )
+ if !utils.IsUndefined(model.IpAcl) {
+ tmp, err := utils.ListValuetoStringSlice(model.IpAcl)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get acl ip list from model: %w", err)
+ }
+ aclList = &tmp
+ }
+
+ result := &sfs.CreateResourcePoolPayload{
+ AvailabilityZone: model.AvailabilityZone.ValueStringPointer(),
+ IpAcl: aclList,
+ Name: model.Name.ValueStringPointer(),
+ PerformanceClass: model.PerformanceClass.ValueStringPointer(),
+ SizeGigabytes: model.SizeGigabytes.ValueInt64Pointer(),
+ SnapshotsAreVisible: model.SnapshotsAreVisible.ValueBoolPointer(),
+ }
+ return result, nil
+}
+
+func toUpdatePayload(model *Model) (*sfs.UpdateResourcePoolPayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+ var (
+ aclList *[]string
+ )
+ if !utils.IsUndefined(model.IpAcl) {
+ tmp, err := utils.ListValuetoStringSlice(model.IpAcl)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get acl ip list from model: %w", err)
+ }
+ aclList = &tmp
+ }
+
+ result := &sfs.UpdateResourcePoolPayload{
+ IpAcl: aclList,
+ PerformanceClass: model.PerformanceClass.ValueStringPointer(),
+ SizeGigabytes: model.SizeGigabytes.ValueInt64Pointer(),
+ SnapshotsAreVisible: model.SnapshotsAreVisible.ValueBoolPointer(),
+ }
+ return result, nil
+}
diff --git a/stackit/internal/services/sfs/resourcepool/resource_test.go b/stackit/internal/services/sfs/resourcepool/resource_test.go
new file mode 100644
index 000000000..86eaf3a20
--- /dev/null
+++ b/stackit/internal/services/sfs/resourcepool/resource_test.go
@@ -0,0 +1,263 @@
+package resourcepool
+
+import (
+ "context"
+ _ "embed"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/uuid"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+var (
+ testProjectId = types.StringValue(uuid.NewString())
+ testResourcePoolId = types.StringValue(uuid.NewString())
+ testRegion = types.StringValue("eu01")
+ testId = types.StringValue(testProjectId.ValueString() + "," + testRegion.ValueString() + "," + testResourcePoolId.ValueString())
+ testAvailabilityZone = types.StringValue("some zone")
+ testIpAcl = types.ListValueMust(types.StringType, []attr.Value{types.StringValue("foo"), types.StringValue("bar"), types.StringValue("baz")})
+)
+
+func TestMapFields(t *testing.T) {
+ testTime := time.Now()
+ tests := []struct {
+ name string
+ state *Model
+ region string
+ input *sfs.GetResourcePoolResponseResourcePool
+ expected *Model
+ isValid bool
+ }{
+ {
+ "default_values",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ },
+ testRegion.ValueString(),
+ &sfs.GetResourcePoolResponseResourcePool{
+ Id: testResourcePoolId.ValueStringPointer(),
+ },
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: types.StringNull(),
+ IpAcl: types.ListNull(types.StringType),
+ Name: types.StringNull(),
+ PerformanceClass: types.StringNull(),
+ SizeGigabytes: types.Int64Null(),
+ Region: testRegion,
+ },
+ true,
+ },
+ {
+ name: "simple_values",
+ state: &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ },
+ region: testRegion.ValueString(),
+ input: &sfs.GetResourcePoolResponseResourcePool{
+ AvailabilityZone: testAvailabilityZone.ValueStringPointer(),
+ CountShares: utils.Ptr[int64](42),
+ CreatedAt: &testTime,
+ Id: testResourcePoolId.ValueStringPointer(),
+ IpAcl: &[]string{"foo", "bar", "baz"},
+ MountPath: utils.Ptr("mountpoint"),
+ Name: utils.Ptr("testname"),
+ PerformanceClass: &sfs.ResourcePoolPerformanceClass{
+ Name: utils.Ptr("performance"),
+ PeakIops: utils.Ptr[int64](42),
+ Throughput: utils.Ptr[int64](54),
+ },
+ PerformanceClassDowngradableAt: &testTime,
+ SizeReducibleAt: &testTime,
+ Space: &sfs.ResourcePoolSpace{
+ SizeGigabytes: utils.Ptr[int64](42),
+ },
+ State: utils.Ptr("state"),
+ },
+ expected: &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("foo"),
+ types.StringValue("bar"),
+ types.StringValue("baz"),
+ }),
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ Region: testRegion,
+ },
+ isValid: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ if err := mapFields(ctx, tt.region, tt.input, tt.state); (err == nil) != tt.isValid {
+ t.Errorf("unexpected error")
+ }
+ if tt.isValid {
+ if diff := cmp.Diff(tt.state, tt.expected); diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestToCreatePayload(t *testing.T) {
+ tests := []struct {
+ name string
+ model *Model
+ want *sfs.CreateResourcePoolPayload
+ wantErr bool
+ }{
+ {
+ "default",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: testIpAcl,
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ },
+ &sfs.CreateResourcePoolPayload{
+ AvailabilityZone: testAvailabilityZone.ValueStringPointer(),
+ IpAcl: utils.Ptr([]string{"foo", "bar", "baz"}),
+ Name: utils.Ptr("testname"),
+ PerformanceClass: utils.Ptr("performance"),
+ SizeGigabytes: utils.Ptr[int64](42),
+ },
+ false,
+ },
+ {
+ "undefined ACL",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: types.ListNull(types.StringType),
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ },
+ &sfs.CreateResourcePoolPayload{
+ AvailabilityZone: testAvailabilityZone.ValueStringPointer(),
+ IpAcl: nil,
+ Name: utils.Ptr("testname"),
+ PerformanceClass: utils.Ptr("performance"),
+ SizeGigabytes: utils.Ptr[int64](42),
+ },
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := toCreatePayload(tt.model)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("toCreatePayload() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("toCreatePayload() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestToUpdatePayload(t *testing.T) {
+ tests := []struct {
+ name string
+ model *Model
+ want *sfs.UpdateResourcePoolPayload
+ wantErr bool
+ }{
+ {
+ "default",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: testIpAcl,
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ SnapshotsAreVisible: types.BoolValue(true),
+ },
+ &sfs.UpdateResourcePoolPayload{
+ IpAcl: utils.Ptr([]string{"foo", "bar", "baz"}),
+ PerformanceClass: utils.Ptr("performance"),
+ SizeGigabytes: utils.Ptr[int64](42),
+ SnapshotsAreVisible: utils.Ptr[bool](true),
+ },
+ false,
+ },
+ {
+ "undefined ACL",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: types.ListNull(types.StringType),
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ },
+ &sfs.UpdateResourcePoolPayload{
+ IpAcl: nil,
+ PerformanceClass: utils.Ptr("performance"),
+ SizeGigabytes: utils.Ptr[int64](42),
+ },
+ false,
+ },
+ {
+ "empty ACL",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ AvailabilityZone: testAvailabilityZone,
+ IpAcl: types.ListValueMust(types.StringType, []attr.Value{}),
+ Name: types.StringValue("testname"),
+ PerformanceClass: types.StringValue("performance"),
+ SizeGigabytes: types.Int64Value(42),
+ },
+ &sfs.UpdateResourcePoolPayload{
+ IpAcl: utils.Ptr([]string{}),
+ PerformanceClass: utils.Ptr("performance"),
+ SizeGigabytes: utils.Ptr[int64](42),
+ },
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := toUpdatePayload(tt.model)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("toUpdatePayload() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("toUpdatePayload() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sfs/sfs_acc_test.go b/stackit/internal/services/sfs/sfs_acc_test.go
new file mode 100644
index 000000000..441cf44c9
--- /dev/null
+++ b/stackit/internal/services/sfs/sfs_acc_test.go
@@ -0,0 +1,536 @@
+package sfs_test
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "text/template"
+
+ "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+ "github.com/hashicorp/terraform-plugin-testing/terraform"
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/testutil"
+)
+
+var exportPolicyResource = map[string]string{
+ "name": fmt.Sprintf("acc-sfs-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "project_id": testutil.ProjectId,
+ "region": "eu01",
+ "ip_acl_1": "172.16.0.0/24",
+ "ip_acl_2": "172.16.0.250/32",
+ "ip_acl_1_update": "172.17.0.0/24",
+ "ip_acl_2_update": "172.17.0.250/32",
+}
+
+func resourceConfigExportPolicy() string {
+ return fmt.Sprintf(`
+ %s
+
+ resource "stackit_sfs_export_policy" "exportpolicy" {
+ project_id = "%s"
+ name = "%s"
+ rules = [
+ {
+ ip_acl = [%q, %q]
+ order = 1
+ }
+ ]
+ }
+ `,
+ testutil.SFSProviderConfig(),
+ exportPolicyResource["project_id"],
+ exportPolicyResource["name"],
+ exportPolicyResource["ip_acl_1"],
+ exportPolicyResource["ip_acl_2"],
+ )
+}
+
+func resourceConfigUpdateExportPolicy() string {
+ return fmt.Sprintf(`
+ %s
+
+ resource "stackit_sfs_export_policy" "exportpolicy" {
+ project_id = "%s"
+ name = "%s"
+ rules = [
+ {
+ ip_acl = [%q, %q]
+ order = 1
+ },
+ {
+ ip_acl = [%q, %q]
+ order = 2
+ }
+ ]
+ }
+ `,
+ testutil.SFSProviderConfig(),
+ exportPolicyResource["project_id"],
+ exportPolicyResource["name"],
+ exportPolicyResource["ip_acl_1"],
+ exportPolicyResource["ip_acl_2"],
+ exportPolicyResource["ip_acl_1_update"],
+ exportPolicyResource["ip_acl_2_update"],
+ )
+}
+
+var (
+ testCreateResourcePool = map[string]string{
+ "providerConfig": testutil.SFSProviderConfig(),
+ "name": fmt.Sprintf("acc-sfs-resource-pool-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "project_id": testutil.ProjectId,
+ "availability_zone": "eu01-m",
+ "performance_class": "Standard",
+ "acl": `["192.168.42.1/32", "192.168.42.2/32"]`,
+ "size_gigabytes": "500",
+ "snapshots_are_visible": "true",
+ }
+
+ testUpdateResourcePool = map[string]string{
+ "providerConfig": testutil.SFSProviderConfig(),
+ "name": fmt.Sprintf("acc-sfs-resource-pool-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "project_id": testutil.ProjectId,
+ "availability_zone": "eu01-m",
+ "performance_class": "Premium",
+ "acl": `["192.168.52.1/32", "192.168.52.2/32"]`,
+ "size_gigabytes": "500",
+ "snapshots_are_visible": "false",
+ }
+)
+
+func resourcePoolConfig(configParams map[string]string) string {
+ tmpl := template.Must(template.New("config").
+ Parse(`
+ {{.providerConfig}}
+
+ resource "stackit_sfs_resource_pool" "resourcepool" {
+ project_id = "{{.project_id}}"
+ name = "{{.name}}"
+ availability_zone = "{{.availability_zone}}"
+ performance_class = "{{.performance_class}}"
+ size_gigabytes = {{.size_gigabytes}}
+ ip_acl = {{.acl}}
+ snapshots_are_visible = {{.snapshots_are_visible}}
+ }
+
+ data "stackit_sfs_resource_pool" "resource_pool_ds" {
+ project_id = stackit_sfs_resource_pool.resourcepool.project_id
+ resource_pool_id = stackit_sfs_resource_pool.resourcepool.resource_pool_id
+ }
+ `))
+ var buffer strings.Builder
+ if err := tmpl.ExecuteTemplate(&buffer, "config", configParams); err != nil {
+ panic(fmt.Sprintf("cannot render template: %v", err))
+ }
+ return buffer.String()
+}
+
+var (
+ testCreateShare = map[string]string{
+ "providerConfig": testutil.SFSProviderConfig(),
+ "resource_pool_name": fmt.Sprintf("acc-sfs-resource-pool-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "name": fmt.Sprintf("acc-sfs-share-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "project_id": testutil.ProjectId,
+ "region": "eu01",
+ "space_hard_limit_gigabytes": "42",
+ }
+
+ testUpdateShare = map[string]string{
+ "providerConfig": testutil.SFSProviderConfig(),
+ "resource_pool_name": fmt.Sprintf("acc-sfs-resource-pool-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "name": fmt.Sprintf("acc-sfs-share-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
+ "project_id": testutil.ProjectId,
+ "region": "eu02",
+ "space_hard_limit_gigabytes": "42",
+ }
+)
+
+func nsfShareConfig(configParams map[string]string) string {
+ tmpl := template.Must(template.New("config").
+ Parse(`
+ {{.providerConfig}}
+
+
+ resource "stackit_sfs_resource_pool" "resourcepool" {
+ project_id = "{{.project_id}}"
+ name = "{{.resource_pool_name}}"
+ availability_zone = "eu01-m"
+ performance_class = "Standard"
+ size_gigabytes = 512
+ ip_acl = ["192.168.42.1/32"]
+ region = "eu01"
+ }
+
+ resource "stackit_sfs_export_policy" "exportpolicy" {
+ project_id = "{{.project_id}}"
+ name = "{{.name}}"
+ rules = [
+ {
+ ip_acl = ["192.168.2.0/24"]
+ order = 1
+ }
+ ]
+ }
+
+ resource "stackit_sfs_share" "share" {
+ project_id = "{{.project_id}}"
+ resource_pool_id = stackit_sfs_resource_pool.resourcepool.resource_pool_id
+ name = "{{.name}}"
+ export_policy = stackit_sfs_export_policy.exportpolicy.name
+ space_hard_limit_gigabytes = {{.space_hard_limit_gigabytes}}
+ }
+
+ data "stackit_sfs_share" "share_ds" {
+ project_id = "{{.project_id}}"
+ resource_pool_id = stackit_sfs_resource_pool.resourcepool.resource_pool_id
+ share_id = stackit_sfs_share.share.share_id
+ }
+
+ `))
+ var buffer strings.Builder
+ if err := tmpl.ExecuteTemplate(&buffer, "config", configParams); err != nil {
+ panic(fmt.Sprintf("cannot render template: %v", err))
+ }
+ return buffer.String()
+}
+
+func TestAccExportPolicyResource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
+ CheckDestroy: testAccExportPolicyDestroy,
+ Steps: []resource.TestStep{
+ // Creation
+ {
+ Config: resourceConfigExportPolicy(),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "project_id", exportPolicyResource["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_export_policy.exportpolicy", "id"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "name", exportPolicyResource["name"]),
+ // check rule
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.#", "1"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.order", "1"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.0", exportPolicyResource["ip_acl_1"]),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.1", exportPolicyResource["ip_acl_2"]),
+
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.read_only", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.set_uuid", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.super_user", "true"),
+ ),
+ },
+ // data source
+ {
+ Config: fmt.Sprintf(`
+ %s
+
+ data "stackit_sfs_export_policy" "policy_data_test" {
+ project_id = stackit_sfs_export_policy.exportpolicy.project_id
+ policy_id = stackit_sfs_export_policy.exportpolicy.policy_id
+ }
+
+ `,
+ resourceConfigExportPolicy(),
+ ),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "project_id", exportPolicyResource["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_export_policy.exportpolicy", "id"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "name", exportPolicyResource["name"]),
+ // check rule
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.#", "1"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.order", "1"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.0", exportPolicyResource["ip_acl_1"]),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.1", exportPolicyResource["ip_acl_2"]),
+
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.read_only", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.set_uuid", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.super_user", "true"),
+
+ // data source
+ resource.TestCheckResourceAttr("data.stackit_sfs_export_policy.policy_data_test", "project_id", exportPolicyResource["project_id"]),
+ resource.TestCheckResourceAttrSet("data.stackit_sfs_export_policy.policy_data_test", "policy_id"),
+ ),
+ },
+ // Import
+ {
+ ResourceName: "stackit_sfs_export_policy.exportpolicy",
+ ImportStateIdFunc: func(s *terraform.State) (string, error) {
+ r, ok := s.RootModule().Resources["stackit_sfs_export_policy.exportpolicy"]
+ if !ok {
+ return "", fmt.Errorf("couldn't find resource stackit_sfs_export_policy.exportpolicy")
+ }
+ policyId, ok := r.Primary.Attributes["policy_id"]
+ if !ok {
+ return "", fmt.Errorf("couldn't find attribute policy_id")
+ }
+ return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, testutil.Region, policyId), nil
+ },
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ // Update
+ {
+ Config: resourceConfigUpdateExportPolicy(),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "project_id", exportPolicyResource["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_export_policy.exportpolicy", "id"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "name", exportPolicyResource["name"]),
+ // check rules
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.order", "1"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.0", exportPolicyResource["ip_acl_1"]),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.ip_acl.1", exportPolicyResource["ip_acl_2"]),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.read_only", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.set_uuid", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.0.super_user", "true"),
+
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.order", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.ip_acl.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.ip_acl.0", exportPolicyResource["ip_acl_1_update"]),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.ip_acl.1", exportPolicyResource["ip_acl_2_update"]),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.read_only", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.set_uuid", "false"),
+ resource.TestCheckResourceAttr("stackit_sfs_export_policy.exportpolicy", "rules.1.super_user", "true"),
+ ),
+ },
+ // Deletion is done by the framework implicitly
+ },
+ })
+}
+
+func TestAccResourcePoolResource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
+ CheckDestroy: testAccResourcePoolDestroyed,
+ Steps: []resource.TestStep{
+ // Creation
+ {
+ Config: resourcePoolConfig(testCreateResourcePool),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "project_id", testCreateResourcePool["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_resource_pool.resourcepool", "resource_pool_id"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "name", testCreateResourcePool["name"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "availability_zone", testCreateResourcePool["availability_zone"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "performance_class", testCreateResourcePool["performance_class"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "size_gigabytes", testCreateResourcePool["size_gigabytes"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "ip_acl.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "ip_acl.0", "192.168.42.1/32"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "ip_acl.1", "192.168.42.2/32"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "snapshots_are_visible", testCreateResourcePool["snapshots_are_visible"]),
+
+ // datasource
+ resource.TestCheckResourceAttr("data.stackit_sfs_resource_pool.resource_pool_ds", "project_id", testCreateResourcePool["project_id"]),
+ resource.TestCheckResourceAttrSet("data.stackit_sfs_resource_pool.resource_pool_ds", "resource_pool_id"),
+ ),
+ },
+
+ { // import
+ ResourceName: "stackit_sfs_resource_pool.resourcepool",
+ ImportStateIdFunc: func(s *terraform.State) (string, error) {
+ res, found := s.RootModule().Resources["stackit_sfs_resource_pool.resourcepool"]
+ if !found {
+ return "", fmt.Errorf("could not find resource stackit_sfs_resource_pool.resourcepool")
+ }
+ resourcepoolId, ok := res.Primary.Attributes["resource_pool_id"]
+ if !ok {
+ return "", fmt.Errorf("resource pool id attribute not found")
+ }
+ return testCreateResourcePool["project_id"] + "," + testutil.Region + "," + resourcepoolId, nil
+ },
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+
+ // Update
+ {
+ Config: resourcePoolConfig(testUpdateResourcePool),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ // resource
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "project_id", testUpdateResourcePool["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_resource_pool.resourcepool", "resource_pool_id"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "name", testUpdateResourcePool["name"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "availability_zone", testUpdateResourcePool["availability_zone"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "performance_class", testUpdateResourcePool["performance_class"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "size_gigabytes", testUpdateResourcePool["size_gigabytes"]),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "ip_acl.#", "2"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "ip_acl.0", "192.168.52.1/32"),
+ resource.TestCheckResourceAttr("stackit_sfs_resource_pool.resourcepool", "ip_acl.1", "192.168.52.2/32"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccShareResource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
+ CheckDestroy: testAccResourcePoolDestroyed,
+ Steps: []resource.TestStep{
+ // Creation
+ {
+ Config: nsfShareConfig(testCreateShare),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("stackit_sfs_share.share", "project_id", testCreateShare["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_share.share", "resource_pool_id"),
+ resource.TestCheckResourceAttrSet("stackit_sfs_share.share", "share_id"),
+ resource.TestCheckResourceAttr("stackit_sfs_share.share", "name", testCreateShare["name"]),
+ resource.TestCheckResourceAttr("stackit_sfs_share.share", "space_hard_limit_gigabytes", testCreateShare["space_hard_limit_gigabytes"]),
+ resource.TestCheckResourceAttrPair("stackit_sfs_share.share", "export_policy",
+ "stackit_sfs_export_policy.exportpolicy", "name"),
+
+ // datasource
+ resource.TestCheckResourceAttr("data.stackit_sfs_share.share_ds", "project_id", testCreateResourcePool["project_id"]),
+ resource.TestCheckResourceAttrSet("data.stackit_sfs_share.share_ds", "resource_pool_id"),
+ resource.TestCheckResourceAttrSet("data.stackit_sfs_share.share_ds", "share_id"),
+ ),
+ },
+
+ { // import
+ ResourceName: "stackit_sfs_share.share",
+ ImportStateIdFunc: func(s *terraform.State) (string, error) {
+ res, found := s.RootModule().Resources["stackit_sfs_share.share"]
+ if !found {
+ return "", fmt.Errorf("could not find resource stackit_sfs_share.share")
+ }
+ resourcepoolId, ok := res.Primary.Attributes["resource_pool_id"]
+ if !ok {
+ return "", fmt.Errorf("resource pool id attribute not found")
+ }
+ shareId, ok := res.Primary.Attributes["share_id"]
+ if !ok {
+ return "", fmt.Errorf("share id attribute not found")
+ }
+ return testCreateResourcePool["project_id"] + "," + testutil.Region + "," + resourcepoolId + "," + shareId, nil
+ },
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+
+ // Update
+ {
+ Config: nsfShareConfig(testUpdateShare),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ // resource
+ resource.TestCheckResourceAttr("stackit_sfs_share.share", "project_id", testUpdateShare["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sfs_share.share", "resource_pool_id"),
+ resource.TestCheckResourceAttrSet("stackit_sfs_share.share", "share_id"),
+ resource.TestCheckResourceAttr("stackit_sfs_share.share", "name", testUpdateShare["name"]),
+ resource.TestCheckResourceAttr("stackit_sfs_share.share", "space_hard_limit_gigabytes", testUpdateShare["space_hard_limit_gigabytes"]),
+ resource.TestCheckResourceAttrPair("stackit_sfs_share.share", "export_policy",
+ "stackit_sfs_export_policy.exportpolicy", "name"),
+ ),
+ },
+ },
+ })
+}
+
+func createClient() (*sfs.APIClient, error) {
+ var client *sfs.APIClient
+ var err error
+ if testutil.SFSCustomEndpoint == "" {
+ client, err = sfs.NewAPIClient()
+ } else {
+ client, err = sfs.NewAPIClient(
+ config.WithEndpoint(testutil.SFSCustomEndpoint),
+ config.WithTokenEndpoint(testutil.TokenCustomEndpoint),
+ )
+ }
+ if err != nil {
+ return nil, fmt.Errorf("creating client: %w", err)
+ }
+
+ return client, nil
+}
+
+func testAccExportPolicyDestroy(s *terraform.State) error {
+ ctx := context.Background()
+ client, err := createClient()
+ if err != nil {
+ return err
+ }
+
+ policyToDestroy := []string{}
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "stackit_sfs_export_policy" {
+ continue
+ }
+ // export policy transform id: "[projectId],[region],[policyId]"
+ policyId := strings.Split(rs.Primary.ID, core.Separator)[1]
+ policyToDestroy = append(policyToDestroy, policyId)
+ }
+
+ policiesResp, err := client.ListShareExportPoliciesExecute(ctx, testutil.ProjectId, exportPolicyResource["region"])
+ if err != nil {
+ return fmt.Errorf("getting policiesResp: %w", err)
+ }
+
+ // iterate over policiesResp
+ policies := *policiesResp.ShareExportPolicies
+ for i := range policies {
+ id := *policies[i].Id
+ if utils.Contains(policyToDestroy, id) {
+ _, err := client.DeleteShareExportPolicy(ctx, testutil.ProjectId, exportPolicyResource["region"], id).Execute()
+ if err != nil {
+ return fmt.Errorf("deleting policy %s during CheckDestroy: %w", *policies[i].Id, err)
+ }
+ }
+ }
+ return nil
+}
+
+func testAccResourcePoolDestroyed(s *terraform.State) error {
+ ctx := context.Background()
+ client, err := createClient()
+ if err != nil {
+ return err
+ }
+
+ resourcePoolsToDestroy := []string{}
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "stackit_sfs_resource_pool" {
+ continue
+ }
+ // export policy transform id: "[projectId],[resource_pool_id]"
+ resourcePoolId := strings.Split(rs.Primary.ID, core.Separator)[1]
+ resourcePoolsToDestroy = append(resourcePoolsToDestroy, resourcePoolId)
+ }
+
+ region := testutil.Region
+ resourcePoolsResp, err := client.ListResourcePoolsExecute(ctx, testutil.ProjectId, region)
+ if err != nil {
+ return fmt.Errorf("getting resource pools: %w", err)
+ }
+
+ // iterate over policiesResp
+ for _, pool := range resourcePoolsResp.GetResourcePools() {
+ id := pool.Id
+
+ if utils.Contains(resourcePoolsToDestroy, *id) {
+ shares, err := client.ListSharesExecute(ctx, testutil.ProjectId, region, *id)
+ if err != nil {
+ return fmt.Errorf("cannot list shares: %w", err)
+ }
+ if shares.Shares != nil {
+ for _, share := range *shares.Shares {
+ _, err := client.DeleteShareExecute(ctx, testutil.ProjectId, region, *id, *share.Id)
+ if err != nil {
+ return fmt.Errorf("cannot delete share %q in pool %q: %w", *share.Id, *id, err)
+ }
+ }
+ }
+
+ _, err = client.DeleteResourcePool(ctx, testutil.ProjectId, region, *id).
+ Execute()
+ if err != nil {
+ return fmt.Errorf("deleting resourcepool %s during CheckDestroy: %w", *pool.Id, err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/stackit/internal/services/sfs/share/datasource.go b/stackit/internal/services/sfs/share/datasource.go
new file mode 100644
index 000000000..6f7a246c7
--- /dev/null
+++ b/stackit/internal/services/sfs/share/datasource.go
@@ -0,0 +1,222 @@
+package share
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+)
+
+var (
+ _ datasource.DataSource = (*shareDataSource)(nil)
+ _ datasource.DataSourceWithConfigure = (*shareDataSource)(nil)
+)
+
+type dataSourceModel struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ ProjectId types.String `tfsdk:"project_id"`
+ ResourcePoolId types.String `tfsdk:"resource_pool_id"`
+ ShareId types.String `tfsdk:"share_id"`
+ Name types.String `tfsdk:"name"`
+ MountPath types.String `tfsdk:"mount_path"`
+ SpaceHardLimitGigabytes types.Int64 `tfsdk:"space_hard_limit_gigabytes"`
+ ExportPolicyName types.String `tfsdk:"export_policy"`
+ Region types.String `tfsdk:"region"`
+}
+type shareDataSource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+func NewShareDataSource() datasource.DataSource {
+ return &shareDataSource{}
+}
+
+// Configure implements datasource.DataSourceWithConfigure.
+func (r *shareDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_sfs_share", core.Datasource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+// Metadata implements datasource.DataSource.
+func (r *shareDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_share"
+}
+
+// Read implements datasource.DataSource.
+func (r *shareDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model dataSourceModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ shareId := model.ShareId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "share_id", shareId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ response, err := r.client.GetShareExecute(ctx, projectId, region, resourcePoolId, shareId)
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading share", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // Map response body to schema
+ err = mapDataSourceFields(ctx, region, response.Share, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading share", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS share read")
+}
+
+// Schema implements datasource.DataSource.
+func (r *shareDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ description := "SFS Share schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ MarkdownDescription: features.AddBetaDescription(description, core.Datasource),
+ Description: description,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`share_id`\".",
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the share is associated.",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "resource_pool_id": schema.StringAttribute{
+ Description: "The ID of the resource pool for the SFS share.",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "share_id": schema.StringAttribute{
+ Description: "share ID",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "export_policy": schema.StringAttribute{
+ Description: `Name of the Share Export Policy to use in the Share.
+Note that if this is not set, the Share can only be mounted in read only by
+clients with IPs matching the IP ACL of the Resource Pool hosting this Share.
+You can also assign a Share Export Policy after creating the Share`,
+ Computed: true,
+ },
+ "space_hard_limit_gigabytes": schema.Int64Attribute{
+ Computed: true,
+ Description: `Space hard limit for the Share.
+ If zero, the Share will have access to the full space of the Resource Pool it lives in.
+ (unit: gigabytes)`,
+ },
+ "mount_path": schema.StringAttribute{
+ Computed: true,
+ Description: "Mount path of the Share, used to mount the Share",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ Description: "Name of the Share",
+ },
+ "region": schema.StringAttribute{
+ // the region cannot be found automatically, so it has to be passed
+ Optional: true,
+ Description: "The resource region. Read-only attribute that reflects the provider region.",
+ },
+ },
+ }
+}
+
+func mapDataSourceFields(_ context.Context, region string, share *sfs.GetShareResponseShare, model *dataSourceModel) error {
+ if share == nil {
+ return fmt.Errorf("share empty in response")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+
+ if share.Id == nil {
+ return fmt.Errorf("share id not present")
+ }
+ model.ShareId = types.StringPointerValue(share.Id)
+ model.Region = types.StringValue(region)
+
+ model.Id = utils.BuildInternalTerraformId(
+ model.ProjectId.ValueString(),
+ region,
+ model.ResourcePoolId.ValueString(),
+ utils.Coalesce(model.ShareId, types.StringPointerValue(share.Id)).ValueString(),
+ )
+ model.Name = types.StringPointerValue(share.Name)
+ if policy := share.ExportPolicy.Get(); policy != nil {
+ model.ExportPolicyName = types.StringPointerValue(policy.Name)
+ }
+
+ model.SpaceHardLimitGigabytes = types.Int64PointerValue(share.SpaceHardLimitGigabytes)
+ if share.HasExportPolicy() {
+ model.ExportPolicyName = types.StringPointerValue(share.ExportPolicy.Get().Name)
+ }
+
+ model.MountPath = types.StringPointerValue(share.MountPath)
+
+ return nil
+}
diff --git a/stackit/internal/services/sfs/share/datasource_test.go b/stackit/internal/services/sfs/share/datasource_test.go
new file mode 100644
index 000000000..24cdc6ae5
--- /dev/null
+++ b/stackit/internal/services/sfs/share/datasource_test.go
@@ -0,0 +1,67 @@
+package share
+
+import (
+ "context"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+func TestMapDatasourceFields(t *testing.T) {
+ tests := []struct {
+ name string
+ state *dataSourceModel
+ region string
+ input *sfs.GetShareResponseShare
+ expected *dataSourceModel
+ isValid bool
+ }{
+ {
+ "default_values",
+ &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ },
+ "eu01",
+ &sfs.GetShareResponseShare{
+ ExportPolicy: sfs.NewNullableShareExportPolicy(&sfs.ShareExportPolicy{
+ Id: testId.ValueStringPointer(),
+ Name: utils.Ptr("test-policy"),
+ }),
+ Id: testShareId.ValueStringPointer(),
+ MountPath: utils.Ptr("/testmount"),
+ Name: utils.Ptr("test-name"),
+ SpaceHardLimitGigabytes: utils.Ptr[int64](42),
+ },
+ &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ ShareId: testShareId,
+ Name: types.StringValue("test-name"),
+ ExportPolicyName: testPolicyName,
+ SpaceHardLimitGigabytes: types.Int64Value(42),
+ MountPath: types.StringValue("/testmount"),
+ Region: testRegion,
+ },
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ if err := mapDataSourceFields(ctx, tt.region, tt.input, tt.state); (err == nil) != tt.isValid {
+ t.Errorf("unexpected error")
+ }
+ if tt.isValid {
+ if diff := cmp.Diff(tt.state, tt.expected); diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sfs/share/resource.go b/stackit/internal/services/sfs/share/resource.go
new file mode 100644
index 000000000..56b021bd1
--- /dev/null
+++ b/stackit/internal/services/sfs/share/resource.go
@@ -0,0 +1,531 @@
+package share
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs/wait"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ coreutils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ resource.Resource = &shareResource{}
+ _ resource.ResourceWithConfigure = &shareResource{}
+ _ resource.ResourceWithModifyPlan = &shareResource{}
+)
+
+type Model struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ ProjectId types.String `tfsdk:"project_id"`
+ ResourcePoolId types.String `tfsdk:"resource_pool_id"`
+ ShareId types.String `tfsdk:"share_id"`
+ Name types.String `tfsdk:"name"`
+ ExportPolicyName types.String `tfsdk:"export_policy"`
+ SpaceHardLimitGigabytes types.Int64 `tfsdk:"space_hard_limit_gigabytes"`
+ Region types.String `tfsdk:"region"`
+ MountPath types.String `tfsdk:"mount_path"`
+}
+
+// NewShareResource is a helper function to simplify the provider implementation.
+func NewShareResource() resource.Resource {
+ return &shareResource{}
+}
+
+// shareResource is the resource implementation.
+type shareResource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+// ModifyPlan implements resource.ResourceWithModifyPlan.
+func (r *shareResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { //nolint:gocritic // defined by terraform api
+ var configModel Model
+ // skip initial empty configuration to avoid follow-up errors
+ if req.Config.Raw.IsNull() {
+ return
+ }
+ resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ var planModel Model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ coreutils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+}
+
+// Metadata returns the resource type name.
+func (r *shareResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_share"
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *shareResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_sfs_share", core.Resource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+// Schema defines the schema for the resource.
+func (r *shareResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ description := "SFS Share schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ MarkdownDescription: features.AddBetaDescription(description, core.Resource),
+ Description: description,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`resource_pool_id`,`share_id`\".",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the share is associated.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "resource_pool_id": schema.StringAttribute{
+ Description: "The ID of the resource pool for the SFS share.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "share_id": schema.StringAttribute{
+ Description: "share ID",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "region": schema.StringAttribute{
+ Optional: true,
+ // must be computed to allow for storing the override value from the provider
+ Computed: true,
+ Description: "The resource region. If not defined, the provider region is used.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ "name": schema.StringAttribute{
+ Description: "Name of the share.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ // api does not support changing the name
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ "export_policy": schema.StringAttribute{
+ Description: `Name of the Share Export Policy to use in the Share.
+Note that if this is set to an empty string, the Share can only be mounted in read only by
+clients with IPs matching the IP ACL of the Resource Pool hosting this Share.
+You can also assign a Share Export Policy after creating the Share`,
+ Required: true,
+ },
+ "space_hard_limit_gigabytes": schema.Int64Attribute{
+ Required: true,
+ Description: `Space hard limit for the Share.
+ If zero, the Share will have access to the full space of the Resource Pool it lives in.
+ (unit: gigabytes)`,
+ },
+ "mount_path": schema.StringAttribute{
+ Computed: true,
+ Description: "Mount path of the Share, used to mount the Share",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ }
+}
+
+// Create creates the resource and sets the initial Terraform state.
+func (r *shareResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ projectId := model.ProjectId.ValueString()
+ region := model.Region.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+
+ ctx = core.InitProviderContext(ctx)
+
+ payload, err := toCreatePayload(&model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Create Resourcepool", fmt.Sprintf("Cannot create payload: %v", err))
+ return
+ }
+
+ // Create new share
+ share, err := r.client.CreateShare(ctx, projectId, region, resourcePoolId).
+ CreateSharePayload(payload).
+ Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ if share.Share == nil || share.Share.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "error creating share", "Calling API: Incomplete response (id missing)")
+ return
+ }
+ // Write id attributes to state before polling via the wait handler - just in case anything goes wrong during the wait handler
+ utils.SetAndLogStateFields(ctx, &resp.Diagnostics, &resp.State, map[string]any{
+ "project_id": projectId,
+ "region": region,
+ "resource_pool_id": resourcePoolId,
+ "share_id": *share.Share.Id,
+ })
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ response, err := wait.CreateShareWaitHandler(ctx, r.client, projectId, region, resourcePoolId, *share.Share.Id).
+ WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", fmt.Sprintf("share creation waiting: %v", err))
+ return
+ }
+ ctx = tflog.SetField(ctx, "share_id", response.Share.Id)
+
+ // the responses of create and update are not compatible, so we can't use a unified
+ // mapFields function. Therefore, we issue a GET request after the create
+ // to get a compatible structure
+ if response.Share == nil || response.Share.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", "response did not contain an ID")
+ return
+ }
+ getResponse, err := r.client.GetShareExecute(ctx, projectId, region, resourcePoolId, *response.Share.Id)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", fmt.Sprintf("share get: %v", err))
+ return
+ }
+
+ // Map response body to schema
+ err = mapFields(ctx, getResponse.Share, region, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS Share created")
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *shareResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ shareId := model.ShareId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "share_id", shareId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ response, err := r.client.GetShareExecute(ctx, projectId, region, resourcePoolId, shareId)
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading share", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // Map response body to schema
+ err = mapFields(ctx, response.Share, region, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading share", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS share read")
+}
+
+// Update updates the resource and sets the updated Terraform state on success.
+func (r *shareResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ shareId := model.ShareId.ValueString()
+ region := model.Region.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "share_id", shareId)
+ ctx = tflog.SetField(ctx, "region", region)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+
+ ctx = core.InitProviderContext(ctx)
+
+ // Retrieve values from state
+ var stateModel Model
+ diags = req.State.Get(ctx, &stateModel)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ payload, err := toUpdatePayload(&model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Update share", fmt.Sprintf("cannot create payload: %v", err))
+ return
+ }
+
+ response, err := r.client.UpdateShare(ctx, projectId, region, resourcePoolId, shareId).
+ UpdateSharePayload(*payload).
+ Execute()
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating share", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // the responses of create and update are not compatible, so we can't use a unified
+ // mapFields function. Therefore, we issue a GET request after the create
+ // to get a compatible structure
+ if response.Share == nil || response.Share.Id == nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", "response did not contain an ID")
+ return
+ }
+
+ getResponse, err := wait.UpdateShareWaitHandler(ctx, r.client, projectId, region, resourcePoolId, shareId).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating share", fmt.Sprintf("share get: %v", err))
+ return
+ }
+ err = mapFields(ctx, getResponse.Share, region, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating share", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS share updated")
+}
+
+// Delete deletes the resource and removes the Terraform state on success.
+func (r *shareResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from state
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ projectId := model.ProjectId.ValueString()
+ shareId := model.ShareId.ValueString()
+ region := model.Region.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "share_id", shareId)
+ ctx = tflog.SetField(ctx, "region", region)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+
+ ctx = core.InitProviderContext(ctx)
+
+ // Delete existing share
+ _, err := r.client.DeleteShareExecute(ctx, projectId, region, resourcePoolId, shareId)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting share", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // only delete, if no error occurred
+ _, err = wait.DeleteShareWaitHandler(ctx, r.client, projectId, region, resourcePoolId, shareId).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting share", fmt.Sprintf("share deletion waiting: %v", err))
+ return
+ }
+
+ tflog.Info(ctx, "SFS share deleted")
+}
+
+// ImportState implements resource.ResourceWithImportState.
+func (r *shareResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ idParts := strings.Split(req.ID, core.Separator)
+ if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
+ core.LogAndAddError(ctx, &resp.Diagnostics,
+ "Error importing share",
+ fmt.Sprintf("Expected import identifier with format [project_id],[region],[resource_pool_id],[share_id], got %q", req.ID),
+ )
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("resource_pool_id"), idParts[2])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("share_id"), idParts[3])...)
+
+ tflog.Info(ctx, "SFS share imported")
+}
+
+func mapFields(_ context.Context, share *sfs.GetShareResponseShare, region string, model *Model) error {
+ if share == nil {
+ return fmt.Errorf("share empty in response")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+
+ if share.Id == nil {
+ return fmt.Errorf("share id not present")
+ }
+ model.ShareId = types.StringPointerValue(share.Id)
+
+ model.Region = types.StringValue(region)
+ model.Id = utils.BuildInternalTerraformId(
+ model.ProjectId.ValueString(),
+ region,
+ model.ResourcePoolId.ValueString(),
+ utils.Coalesce(model.ShareId, types.StringPointerValue(share.Id)).ValueString(),
+ )
+ model.Name = types.StringPointerValue(share.Name)
+
+ if policy := share.ExportPolicy.Get(); policy != nil {
+ model.ExportPolicyName = types.StringPointerValue(policy.Name)
+ }
+
+ model.SpaceHardLimitGigabytes = types.Int64PointerValue(share.SpaceHardLimitGigabytes)
+ model.MountPath = types.StringPointerValue(share.MountPath)
+
+ return nil
+}
+
+func toCreatePayload(model *Model) (ret sfs.CreateSharePayload, err error) {
+ if model == nil {
+ return ret, fmt.Errorf("nil model")
+ }
+ result := sfs.CreateSharePayload{
+ ExportPolicyName: sfs.NewNullableString(model.ExportPolicyName.ValueStringPointer()),
+ Name: model.Name.ValueStringPointer(),
+ SpaceHardLimitGigabytes: model.SpaceHardLimitGigabytes.ValueInt64Pointer(),
+ }
+ return result, nil
+}
+
+func toUpdatePayload(model *Model) (*sfs.UpdateSharePayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+
+ result := &sfs.UpdateSharePayload{
+ ExportPolicyName: sfs.NewNullableString(model.ExportPolicyName.ValueStringPointer()),
+ SpaceHardLimitGigabytes: model.SpaceHardLimitGigabytes.ValueInt64Pointer(),
+ }
+ return result, nil
+}
diff --git a/stackit/internal/services/sfs/share/resource_test.go b/stackit/internal/services/sfs/share/resource_test.go
new file mode 100644
index 000000000..f9127374d
--- /dev/null
+++ b/stackit/internal/services/sfs/share/resource_test.go
@@ -0,0 +1,194 @@
+package share
+
+import (
+ "context"
+ _ "embed"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/uuid"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+var (
+ testProjectId = types.StringValue(uuid.NewString())
+ testResourcePoolId = types.StringValue(uuid.NewString())
+ testShareId = types.StringValue(uuid.NewString())
+ testRegion = types.StringValue("eu01")
+ testId = types.StringValue(testProjectId.ValueString() + "," + testRegion.ValueString() + "," + testResourcePoolId.ValueString() + "," + testShareId.ValueString())
+ testPolicyName = types.StringValue("test-policy")
+)
+
+func TestMapFields(t *testing.T) {
+ testTime := time.Now()
+ tests := []struct {
+ name string
+ state *Model
+ region string
+ input *sfs.GetShareResponseShare
+ expected *Model
+ isValid bool
+ }{
+ {
+ "default_values",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ },
+ "eu01",
+ &sfs.GetShareResponseShare{
+ Id: testShareId.ValueStringPointer(),
+ Name: utils.Ptr("testname"),
+ ExportPolicy: sfs.NewNullableShareExportPolicy(&sfs.ShareExportPolicy{
+ Name: utils.Ptr("test-policy"),
+ }),
+ SpaceHardLimitGigabytes: utils.Ptr[int64](42),
+ },
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ ShareId: testShareId,
+ Name: types.StringValue("testname"),
+ ExportPolicyName: testPolicyName,
+ SpaceHardLimitGigabytes: types.Int64Value(42),
+ Region: types.StringValue("eu01"),
+ },
+ true,
+ },
+ {
+ name: "simple_values",
+ state: &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ },
+ region: "eu01",
+ input: &sfs.GetShareResponseShare{
+ CreatedAt: &testTime,
+ Id: testShareId.ValueStringPointer(),
+ MountPath: utils.Ptr("mountpoint"),
+ Name: utils.Ptr("testname"),
+ SpaceHardLimitGigabytes: sfs.PtrInt64(42),
+ State: utils.Ptr("state"),
+ ExportPolicy: sfs.NewNullableShareExportPolicy(&sfs.ShareExportPolicy{
+ Name: utils.Ptr("test-policy"),
+ }),
+ },
+ expected: &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ Name: types.StringValue("testname"),
+ ShareId: testShareId,
+ ExportPolicyName: testPolicyName,
+ SpaceHardLimitGigabytes: types.Int64Value(42),
+ Region: testRegion,
+ MountPath: types.StringValue("mountpoint"),
+ },
+ isValid: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ if err := mapFields(ctx, tt.input, tt.region, tt.state); (err == nil) != tt.isValid {
+ t.Errorf("unexpected error")
+ }
+ if tt.isValid {
+ if diff := cmp.Diff(tt.state, tt.expected); diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestToCreatePayload(t *testing.T) {
+ tests := []struct {
+ name string
+ model *Model
+ want sfs.CreateSharePayload
+ wantErr bool
+ }{
+ {
+ "default",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ ShareId: testShareId,
+ Name: types.StringValue("testname"),
+ ExportPolicyName: testPolicyName,
+ SpaceHardLimitGigabytes: types.Int64Value(42),
+ },
+ sfs.CreateSharePayload{
+ ExportPolicyName: sfs.NewNullableString(utils.Ptr("test-policy")),
+ Name: sfs.PtrString("testname"),
+ SpaceHardLimitGigabytes: sfs.PtrInt64(42),
+ },
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := toCreatePayload(tt.model)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("toCreatePayload() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ if !tt.wantErr {
+ if diff := cmp.Diff(got, tt.want, cmp.AllowUnexported(sfs.NullableString{})); diff != "" {
+ t.Errorf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestToUpdatePayload(t *testing.T) {
+ tests := []struct {
+ name string
+ model *Model
+ want *sfs.UpdateSharePayload
+ wantErr bool
+ }{
+ {
+ "default",
+ &Model{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ ShareId: testShareId,
+ Name: types.StringValue("testname"),
+ SpaceHardLimitGigabytes: types.Int64Value(42),
+ ExportPolicyName: testPolicyName,
+ },
+ &sfs.UpdateSharePayload{
+ ExportPolicyName: sfs.NewNullableString(testPolicyName.ValueStringPointer()),
+ SpaceHardLimitGigabytes: sfs.PtrInt64(42),
+ },
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := toUpdatePayload(tt.model)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("toCreatePayload() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ if !tt.wantErr {
+ if diff := cmp.Diff(got, tt.want, cmp.AllowUnexported(sfs.NullableString{})); diff != "" {
+ t.Errorf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sfs/snapshots/datasource.go b/stackit/internal/services/sfs/snapshots/datasource.go
new file mode 100644
index 000000000..5d576d992
--- /dev/null
+++ b/stackit/internal/services/sfs/snapshots/datasource.go
@@ -0,0 +1,259 @@
+package snapshots
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
+ sfsUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+)
+
+var (
+ _ datasource.DataSource = (*resourcePoolSnapshotDataSource)(nil)
+ _ datasource.DataSourceWithConfigure = (*resourcePoolSnapshotDataSource)(nil)
+)
+
+var snapshotModelType = types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "comment": types.StringType,
+ "created_at": types.StringType,
+ "resource_pool_id": types.StringType,
+ "snapshot_name": types.StringType,
+ "logical_size_gigabytes": types.Int64Type,
+ "size_gigabytes": types.Int64Type,
+ },
+}
+
+type snapshotModel struct {
+ Comment types.String `tfsdk:"comment"`
+ CreatedAt types.String `tfsdk:"created_at"`
+ ResourcePoolId types.String `tfsdk:"resource_pool_id"`
+ SnapshotName types.String `tfsdk:"snapshot_name"`
+ SizeGigabytes types.Int64 `tfsdk:"size_gigabytes"`
+ LogicalSizeGigabytes types.Int64 `tfsdk:"logical_size_gigabytes"`
+}
+
+type dataSourceModel struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ ProjectId types.String `tfsdk:"project_id"`
+ ResourcePoolId types.String `tfsdk:"resource_pool_id"`
+ Region types.String `tfsdk:"region"`
+ Snapshots types.List `tfsdk:"snapshots"`
+}
+
+type resourcePoolSnapshotDataSource struct {
+ client *sfs.APIClient
+ providerData core.ProviderData
+}
+
+func NewResourcePoolSnapshotDataSource() datasource.DataSource {
+ return &resourcePoolSnapshotDataSource{}
+}
+
+// Configure implements datasource.DataSourceWithConfigure.
+func (r *resourcePoolSnapshotDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_sfs_resource_pool_snapshot", core.Datasource)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ apiClient := sfsUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "SFS client configured")
+}
+
+// Metadata implements datasource.DataSource.
+func (r *resourcePoolSnapshotDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sfs_resource_pool_snapshot"
+}
+
+// Read implements datasource.DataSource.
+func (r *resourcePoolSnapshotDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model dataSourceModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ resourcePoolId := model.ResourcePoolId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "resource_pool_id", resourcePoolId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ ctx = core.InitProviderContext(ctx)
+
+ response, err := r.client.ListResourcePoolSnapshotsExecute(ctx, projectId, region, resourcePoolId)
+ if err != nil {
+ var openapiError *oapierror.GenericOpenAPIError
+ if errors.As(err, &openapiError) {
+ if openapiError.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading resource pool snapshot", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // Map response body to schema
+ err = mapDataSourceFields(ctx, region, response.ResourcePoolSnapshots, &model)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "error reading resource pool snapshot", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SFS resource pool snapshot read")
+}
+
+// Schema implements datasource.DataSource.
+func (r *resourcePoolSnapshotDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ description := "Resource-pool datasource schema. Must have a `region` specified in the provider configuration."
+ resp.Schema = schema.Schema{
+ MarkdownDescription: features.AddBetaDescription(description, core.Datasource),
+ Description: description,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`resource_pool_id`\".",
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Description: "STACKIT project ID to which the resource pool snapshot is associated.",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "resource_pool_id": schema.StringAttribute{
+ Description: "Resource pool ID",
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "region": schema.StringAttribute{
+ Optional: true,
+ Description: "The resource region. Read-only attribute that reflects the provider region.",
+ },
+ "snapshots": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "comment": schema.StringAttribute{
+ Computed: true,
+ Description: "(optional) A comment to add more information about a snapshot",
+ },
+ "created_at": schema.StringAttribute{
+ Computed: true,
+ Description: "creation date of the snapshot",
+ },
+ "resource_pool_id": schema.StringAttribute{
+ Computed: true,
+ Description: "ID of the Resource Pool of the Snapshot",
+ },
+ "snapshot_name": schema.StringAttribute{
+ Computed: true,
+ Description: "Name of the Resource Pool Snapshot",
+ },
+ "size_gigabytes": schema.Int64Attribute{
+ Computed: true,
+ Description: "Reflects the actual storage footprint in the backend at snapshot time (e.g. how much storage from the Resource Pool does it use)",
+ },
+ "logical_size_gigabytes": schema.Int64Attribute{
+ Computed: true,
+ Description: "Represents the user-visible data size at the time of the snapshot (e.g. what’s in the snapshot)",
+ },
+ },
+ },
+ Computed: true,
+ Description: description,
+ },
+ },
+ }
+}
+
+func mapDataSourceFields(ctx context.Context, region string, snapshots *[]sfs.ResourcePoolSnapshot, model *dataSourceModel) error {
+ if snapshots == nil {
+ return fmt.Errorf("resource pool snapshot empty in response")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+ model.Region = types.StringValue(region)
+ var resourcePoolId types.String
+ if utils.IsUndefined(model.ResourcePoolId) {
+ if snapshots == nil || len(*snapshots) == 0 {
+ return fmt.Errorf("no resource pool id defined")
+ }
+ resourcePoolId = types.StringPointerValue((*snapshots)[0].ResourcePoolId)
+ } else {
+ resourcePoolId = model.ResourcePoolId
+ }
+ model.Id = utils.BuildInternalTerraformId(
+ model.ProjectId.ValueString(),
+ region,
+ resourcePoolId.ValueString(),
+ )
+ model.Snapshots = types.ListNull(snapshotModelType)
+ var vals []attr.Value
+ for _, snapshot := range *snapshots {
+ elem := snapshotModel{
+ ResourcePoolId: types.StringPointerValue(snapshot.ResourcePoolId),
+ SnapshotName: types.StringPointerValue(snapshot.SnapshotName),
+ SizeGigabytes: types.Int64PointerValue(snapshot.SizeGigabytes),
+ LogicalSizeGigabytes: types.Int64PointerValue(snapshot.LogicalSizeGigabytes),
+ }
+ if val := snapshot.Comment; val != nil {
+ elem.Comment = types.StringPointerValue(val.Get())
+ }
+ if val := snapshot.CreatedAt; val != nil {
+ elem.CreatedAt = types.StringValue(val.Format(time.RFC3339))
+ }
+ val, diags := types.ObjectValueFrom(ctx, snapshotModelType.AttrTypes, elem)
+ if diags.HasError() {
+ return fmt.Errorf("error while converting snapshot list: %v", diags.Errors())
+ }
+ vals = append(vals, val)
+ }
+
+ list, diags := types.ListValueFrom(ctx, snapshotModelType, vals)
+ if diags.HasError() {
+ return fmt.Errorf("cannot convert snapshot list: %v", diags.Errors())
+ }
+ model.Snapshots = list
+
+ return nil
+}
diff --git a/stackit/internal/services/sfs/snapshots/datasource_test.go b/stackit/internal/services/sfs/snapshots/datasource_test.go
new file mode 100644
index 000000000..68e8f5876
--- /dev/null
+++ b/stackit/internal/services/sfs/snapshots/datasource_test.go
@@ -0,0 +1,109 @@
+package snapshots
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/uuid"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+)
+
+var (
+ testProjectId = types.StringValue(uuid.NewString())
+ testResourcePoolId = types.StringValue(uuid.NewString())
+ testRegion = types.StringValue("eu01")
+ testId = types.StringValue(testProjectId.ValueString() + "," + testRegion.ValueString() + "," + testResourcePoolId.ValueString())
+)
+
+func must[T any](t T, diags diag.Diagnostics) T {
+ if diags.HasError() {
+ panic(fmt.Sprintf("diagnostics contain error: %v", diags.Errors()))
+ }
+ return t
+}
+
+func TestMapDatasourceFields(t *testing.T) {
+ testTime := time.Now()
+ tests := []struct {
+ name string
+ state *dataSourceModel
+ region string
+ input *[]sfs.ResourcePoolSnapshot
+ expected *dataSourceModel
+ isValid bool
+ }{
+ {
+ "default_values",
+ &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ Region: types.StringValue("eu01"),
+ },
+ "eu01",
+ &[]sfs.ResourcePoolSnapshot{
+ {
+ Comment: sfs.NewNullableString(utils.Ptr("comment 1")),
+ CreatedAt: utils.Ptr(testTime),
+ ResourcePoolId: testResourcePoolId.ValueStringPointer(),
+ SnapshotName: utils.Ptr("snapshot-1"),
+ SizeGigabytes: utils.Ptr(int64(50)),
+ LogicalSizeGigabytes: utils.Ptr(int64(50)),
+ },
+ {
+ Comment: sfs.NewNullableString(utils.Ptr("comment 2")),
+ CreatedAt: utils.Ptr(testTime.Add(1 * time.Hour)),
+ ResourcePoolId: testResourcePoolId.ValueStringPointer(),
+ SnapshotName: utils.Ptr("snapshot-2"),
+ SizeGigabytes: utils.Ptr(int64(50)),
+ LogicalSizeGigabytes: utils.Ptr(int64(50)),
+ },
+ },
+ &dataSourceModel{
+ Id: testId,
+ ProjectId: testProjectId,
+ ResourcePoolId: testResourcePoolId,
+ Region: types.StringValue("eu01"),
+ Snapshots: types.ListValueMust(snapshotModelType, []attr.Value{
+ must(types.ObjectValueFrom(context.Background(), snapshotModelType.AttrTypes, snapshotModel{
+ Comment: types.StringValue("comment 1"),
+ CreatedAt: types.StringValue(testTime.Format(time.RFC3339)),
+ ResourcePoolId: testResourcePoolId,
+ SnapshotName: types.StringValue("snapshot-1"),
+ SizeGigabytes: types.Int64Value(50),
+ LogicalSizeGigabytes: types.Int64Value(50),
+ })),
+ must(types.ObjectValueFrom(context.Background(), snapshotModelType.AttrTypes, snapshotModel{
+ Comment: types.StringValue("comment 2"),
+ CreatedAt: types.StringValue(testTime.Add(1 * time.Hour).Format(time.RFC3339)),
+ ResourcePoolId: testResourcePoolId,
+ SnapshotName: types.StringValue("snapshot-2"),
+ SizeGigabytes: types.Int64Value(50),
+ LogicalSizeGigabytes: types.Int64Value(50),
+ })),
+ }),
+ },
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ if err := mapDataSourceFields(ctx, tt.region, tt.input, tt.state); (err == nil) != tt.isValid {
+ t.Errorf("unexpected error")
+ }
+ if tt.isValid {
+ if diff := cmp.Diff(tt.state, tt.expected); diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sfs/utils/util.go b/stackit/internal/services/sfs/utils/util.go
new file mode 100644
index 000000000..2f3f8d023
--- /dev/null
+++ b/stackit/internal/services/sfs/utils/util.go
@@ -0,0 +1,30 @@
+package utils
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+)
+
+func ConfigureClient(ctx context.Context, providerData *core.ProviderData, diags *diag.Diagnostics) *sfs.APIClient {
+ apiClientConfigOptions := []config.ConfigurationOption{
+ config.WithCustomAuth(providerData.RoundTripper),
+ utils.UserAgentConfigOption(providerData.Version),
+ }
+ if providerData.SfsCustomEndpoint != "" {
+ apiClientConfigOptions = append(apiClientConfigOptions, config.WithEndpoint(providerData.SfsCustomEndpoint))
+ }
+ apiClient, err := sfs.NewAPIClient(apiClientConfigOptions...)
+ if err != nil {
+ core.LogAndAddError(ctx, diags, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err))
+ return nil
+ }
+
+ return apiClient
+}
diff --git a/stackit/internal/services/sfs/utils/util_test.go b/stackit/internal/services/sfs/utils/util_test.go
new file mode 100644
index 000000000..f6f88537c
--- /dev/null
+++ b/stackit/internal/services/sfs/utils/util_test.go
@@ -0,0 +1,93 @@
+package utils
+
+import (
+ "context"
+ "os"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ sdkClients "github.com/stackitcloud/stackit-sdk-go/core/clients"
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
+ "github.com/stackitcloud/stackit-sdk-go/services/sfs"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+)
+
+const (
+ testVersion = "1.2.3"
+ testCustomEndpoint = "https://sfs-custom-endpoint.api.stackit.cloud"
+)
+
+func TestConfigureClient(t *testing.T) {
+ /* mock authentication by setting service account token env variable */
+ os.Clearenv()
+ err := os.Setenv(sdkClients.ServiceAccountToken, "mock-val")
+ if err != nil {
+ t.Errorf("error setting env variable: %v", err)
+ }
+
+ type args struct {
+ providerData *core.ProviderData
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ expected *sfs.APIClient
+ }{
+ {
+ name: "default endpoint",
+ args: args{
+ providerData: &core.ProviderData{
+ Version: testVersion,
+ },
+ },
+ expected: func() *sfs.APIClient {
+ apiClient, err := sfs.NewAPIClient(
+ utils.UserAgentConfigOption(testVersion),
+ )
+ if err != nil {
+ t.Errorf("error configuring client: %v", err)
+ }
+ return apiClient
+ }(),
+ wantErr: false,
+ },
+ {
+ name: "custom endpoint",
+ args: args{
+ providerData: &core.ProviderData{
+ Version: testVersion,
+ SfsCustomEndpoint: testCustomEndpoint,
+ },
+ },
+ expected: func() *sfs.APIClient {
+ apiClient, err := sfs.NewAPIClient(
+ utils.UserAgentConfigOption(testVersion),
+ config.WithEndpoint(testCustomEndpoint),
+ )
+ if err != nil {
+ t.Errorf("error configuring client: %v", err)
+ }
+ return apiClient
+ }(),
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ diags := diag.Diagnostics{}
+
+ actual := ConfigureClient(ctx, tt.args.providerData, &diags)
+ if diags.HasError() != tt.wantErr {
+ t.Errorf("ConfigureClient() error = %v, want %v", diags.HasError(), tt.wantErr)
+ }
+
+ if !reflect.DeepEqual(actual, tt.expected) {
+ t.Errorf("ConfigureClient() = %v, want %v", actual, tt.expected)
+ }
+ })
+ }
+}
diff --git a/stackit/internal/testutil/testutil.go b/stackit/internal/testutil/testutil.go
index c7e5654aa..d0f4084b0 100644
--- a/stackit/internal/testutil/testutil.go
+++ b/stackit/internal/testutil/testutil.go
@@ -87,8 +87,10 @@ var (
SQLServerFlexCustomEndpoint = os.Getenv("TF_ACC_SQLSERVERFLEX_CUSTOM_ENDPOINT")
ServerBackupCustomEndpoint = os.Getenv("TF_ACC_SERVER_BACKUP_CUSTOM_ENDPOINT")
ServerUpdateCustomEndpoint = os.Getenv("TF_ACC_SERVER_UPDATE_CUSTOM_ENDPOINT")
+ SFSCustomEndpoint = os.Getenv("TF_ACC_SFS_CUSTOM_ENDPOINT")
ServiceAccountCustomEndpoint = os.Getenv("TF_ACC_SERVICE_ACCOUNT_CUSTOM_ENDPOINT")
SKECustomEndpoint = os.Getenv("TF_ACC_SKE_CUSTOM_ENDPOINT")
+ TokenCustomEndpoint = os.Getenv("TF_ACC_TOKEN_CUSTOM_ENDPOINT")
)
// Provider config helper functions
@@ -435,6 +437,26 @@ func ServerUpdateProviderConfig() string {
)
}
+func SFSProviderConfig() string {
+ if SFSCustomEndpoint == "" || TokenCustomEndpoint == "" {
+ return `
+ provider "stackit" {
+ region = "eu01"
+ enable_beta_resources = true
+ }`
+ }
+ return fmt.Sprintf(`
+ provider "stackit" {
+ region = "eu01"
+ sfs_custom_endpoint = "%s"
+ token_custom_endpoint = "%s"
+ enable_beta_resources = true
+ }`,
+ SFSCustomEndpoint,
+ TokenCustomEndpoint,
+ )
+}
+
func SKEProviderConfig() string {
if SKECustomEndpoint == "" {
return `
diff --git a/stackit/internal/utils/utils.go b/stackit/internal/utils/utils.go
index b7a479afc..9011a6665 100644
--- a/stackit/internal/utils/utils.go
+++ b/stackit/internal/utils/utils.go
@@ -7,13 +7,13 @@ import (
"regexp"
"strings"
- "github.com/hashicorp/terraform-plugin-framework/tfsdk"
-
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/tfsdk"
"github.com/hashicorp/terraform-plugin-framework/types"
+
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
@@ -183,3 +183,29 @@ func SetAndLogStateFields(ctx context.Context, diags *diag.Diagnostics, state *t
}
return ctx
}
+
+// Coalesce returns the first defined value of a set of [attr.Value] values.
+func Coalesce[T interface {
+ IsNull() bool
+ IsUnknown() bool
+}](vals ...T) (r T) {
+ for _, t := range vals {
+ if !t.IsNull() && !t.IsUnknown() {
+ return t
+ }
+ }
+ return r
+}
+
+func Join(separator string, parts ...types.String) types.String {
+ var builder strings.Builder
+ for i, l := 0, len(parts); i < l; i++ {
+ if !IsUndefined(parts[i]) {
+ if i > 0 {
+ builder.WriteString(separator)
+ }
+ builder.WriteString(parts[i].ValueString())
+ }
+ }
+ return types.StringValue(builder.String())
+}
diff --git a/stackit/internal/utils/utils_test.go b/stackit/internal/utils/utils_test.go
index 0dc5bf5b3..aa1bc8a1a 100644
--- a/stackit/internal/utils/utils_test.go
+++ b/stackit/internal/utils/utils_test.go
@@ -610,3 +610,50 @@ func TestSetAndLogStateFields(t *testing.T) {
})
}
}
+
+func TestJoin(t *testing.T) {
+ tests := []struct {
+ name string
+ parts []types.String
+ want types.String
+ }{
+ {"non-empty list", []types.String{types.StringValue("foo"), types.StringValue("bar"), types.StringValue("baz")}, types.StringValue("foo,bar,baz")},
+ {"empty list", []types.String{}, types.StringValue("")},
+ {"single element list", []types.String{types.StringValue("foo")}, types.StringValue("foo")},
+ {"list with empty elements", []types.String{types.StringValue("foo"), types.StringNull(), types.StringValue("baz")}, types.StringValue("foo,baz")},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := Join(",", tt.parts...); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Join() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestCoalesce(t *testing.T) {
+ var (
+ foo = types.StringValue("foo")
+ bar = types.StringValue("bar")
+ baz = types.StringValue("baz")
+ null = types.StringNull()
+ )
+ tests := []struct {
+ name string
+ vals []attr.Value
+ want attr.Value
+ }{
+ {"list with all defined elements", []attr.Value{foo, bar, baz}, foo},
+ {"empty list", []attr.Value{}, nil},
+ {"first element undefined", []attr.Value{null, bar, baz}, bar},
+ {"first and second element undefined", []attr.Value{null, null, baz}, baz},
+ {"all elements undefined", []attr.Value{null, null, null}, nil},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if gotR := Coalesce(tt.vals...); !reflect.DeepEqual(gotR, tt.want) {
+ t.Errorf("Coalesce() = %v, want %v", gotR, tt.want)
+ }
+ })
+ }
+}
diff --git a/stackit/provider.go b/stackit/provider.go
index cebb5be03..d93b2b3e1 100644
--- a/stackit/provider.go
+++ b/stackit/provider.go
@@ -92,6 +92,10 @@ import (
serviceAccount "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/serviceaccount/account"
serviceAccountKey "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/serviceaccount/key"
serviceAccountToken "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/serviceaccount/token"
+ exportpolicy "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/export-policy"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/resourcepool"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/share"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sfs/snapshots"
skeCluster "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/cluster"
skeKubeconfig "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/kubeconfig"
sqlServerFlexInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sqlserverflex/instance"
@@ -160,6 +164,7 @@ type providerModel struct {
ServerUpdateCustomEndpoint types.String `tfsdk:"server_update_custom_endpoint"`
ServiceAccountCustomEndpoint types.String `tfsdk:"service_account_custom_endpoint"`
ServiceEnablementCustomEndpoint types.String `tfsdk:"service_enablement_custom_endpoint"`
+ SfsCustomEndpoint types.String `tfsdk:"sfs_custom_endpoint"`
SkeCustomEndpoint types.String `tfsdk:"ske_custom_endpoint"`
SqlServerFlexCustomEndpoint types.String `tfsdk:"sqlserverflex_custom_endpoint"`
TokenCustomEndpoint types.String `tfsdk:"token_custom_endpoint"`
@@ -206,6 +211,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
"sqlserverflex_custom_endpoint": "Custom endpoint for the SQL Server Flex service",
"ske_custom_endpoint": "Custom endpoint for the Kubernetes Engine (SKE) service",
"service_enablement_custom_endpoint": "Custom endpoint for the Service Enablement API",
+ "sfs_custom_endpoint": "Custom endpoint for the Stackit Filestorage API",
"token_custom_endpoint": "Custom endpoint for the token API, which is used to request access tokens when using the key flow",
"enable_beta_resources": "Enable beta resources. Default is false.",
"experiments": fmt.Sprintf("Enables experiments. These are unstable features without official support. More information can be found in the README. Available Experiments: %v", strings.Join(features.AvailableExperiments, ", ")),
@@ -374,6 +380,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
Optional: true,
Description: descriptions["service_enablement_custom_endpoint"],
},
+ "sfs_custom_endpoint": schema.StringAttribute{
+ Optional: true,
+ Description: descriptions["sfs_custom_endpoint"],
+ },
"token_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["token_custom_endpoint"],
@@ -451,6 +461,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest,
setStringField(providerConfig.ServerUpdateCustomEndpoint, func(v string) { providerData.ServerUpdateCustomEndpoint = v })
setStringField(providerConfig.ServiceAccountCustomEndpoint, func(v string) { providerData.ServiceAccountCustomEndpoint = v })
setStringField(providerConfig.ServiceEnablementCustomEndpoint, func(v string) { providerData.ServiceEnablementCustomEndpoint = v })
+ setStringField(providerConfig.SfsCustomEndpoint, func(v string) { providerData.SfsCustomEndpoint = v })
setStringField(providerConfig.SkeCustomEndpoint, func(v string) { providerData.SKECustomEndpoint = v })
setStringField(providerConfig.SqlServerFlexCustomEndpoint, func(v string) { providerData.SQLServerFlexCustomEndpoint = v })
@@ -558,6 +569,10 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource
serverUpdateSchedule.NewSchedulesDataSource,
serviceAccount.NewServiceAccountDataSource,
skeCluster.NewClusterDataSource,
+ resourcepool.NewResourcePoolDataSource,
+ share.NewShareDataSource,
+ exportpolicy.NewExportPolicyDataSource,
+ snapshots.NewResourcePoolSnapshotDataSource,
}
}
@@ -632,6 +647,9 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource {
serviceAccountKey.NewServiceAccountKeyResource,
skeCluster.NewClusterResource,
skeKubeconfig.NewKubeconfigResource,
+ resourcepool.NewResourcePoolResource,
+ share.NewShareResource,
+ exportpolicy.NewExportPolicyResource,
}
resources = append(resources, roleAssignements.NewRoleAssignmentResources()...)