From 65b2a91c6f553a9bbdd8b410d9f4e1063ad22978 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Tue, 22 Jul 2025 21:58:26 +1000 Subject: [PATCH 1/5] interop demo done --- CHANGELOG.md | 4 + .../aws/DELETE_kms/metastore_key.iql | 50 -- .../resources/aws/DELETE_vpc/elastic_ip.iql | 56 -- .../DELETE_vpc/get_main_route_table_id.iql | 6 - .../resources/aws/DELETE_vpc/inet_gateway.iql | 54 -- .../aws/DELETE_vpc/inet_gw_attachment.iql | 39 -- .../resources/aws/DELETE_vpc/inet_route.iql | 41 -- .../resources/aws/DELETE_vpc/nat_gateway.iql | 53 -- .../aws/DELETE_vpc/nat_inet_route.iql | 41 -- .../resources/aws/DELETE_vpc/route_table.iql | 54 -- .../aws/DELETE_vpc/security_group.iql | 41 -- .../aws/DELETE_vpc/security_group_rules.iql | 27 - .../resources/aws/DELETE_vpc/subnet.iql | 43 -- .../aws/DELETE_vpc/subnet_rt_assn.iql | 34 - .../DELETE_vpc/tag_main_vpc_route_table.iql | 7 - .../resources/aws/DELETE_vpc/vpc.iql | 60 -- .../resources/aws/DELETE_vpc/vpc_endpoint.iql | 60 -- .../serverless/resources/aws/iam/iam_role.iql | 2 +- .../databricks_account/workspace.iql | 4 +- .../external_location.iql | 46 ++ .../serverless/stackql_manifest.yml | 68 +- .../databricks_account/credentials.iql | 41 -- .../databricks_account/get_users.iql | 6 - .../resources/databricks_account/network.iql | 46 -- .../storage_configuration.iql | 35 - .../update_group_membership.iql | 6 - .../databricks_account/workspace.iql | 44 -- .../databricks_account/workspace_group.iql | 32 - .../workspace_permission_assignments.iql | 32 - .../databricks_workspace/catalog.iql | 32 + .../resources/databricks_workspace/schema.iql | 32 + .../service_principal.iql | 31 + .../storage_credential.iql | 43 -- .../resources/snowflake/statement.iql | 19 + .../stackql_manifest.yml | 656 ++++++------------ setup.py | 8 +- stackql_deploy/__init__.py | 2 +- stackql_deploy/cmd/build.py | 51 +- stackql_deploy/cmd/teardown.py | 29 +- stackql_deploy/cmd/test.py | 56 +- website/docs/manifest-file.md | 12 + website/docs/manifest_fields/index.js | 1 + website/docs/manifest_fields/resources.mdx | 2 + .../resources/skipvalidation.mdx | 45 ++ .../docs/manifest_fields/resources/sql.mdx | 4 +- 45 files changed, 605 insertions(+), 1450 deletions(-) delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_kms/metastore_key.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/elastic_ip.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/get_main_route_table_id.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gateway.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gw_attachment.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/inet_route.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/nat_gateway.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/nat_inet_route.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/route_table.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/security_group.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/security_group_rules.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/subnet.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/subnet_rt_assn.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/tag_main_vpc_route_table.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/vpc.iql delete mode 100644 examples/databricks/serverless/resources/aws/DELETE_vpc/vpc_endpoint.iql create mode 100644 examples/databricks/serverless/resources/databricks_workspace/external_location.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/credentials.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/get_users.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/network.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/storage_configuration.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/update_group_membership.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/workspace.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_group.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_permission_assignments.iql create mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql create mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql create mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_workspace/service_principal.iql delete mode 100644 examples/databricks/snowflake-interoperability/resources/databricks_workspace/storage_credential.iql create mode 100644 examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql create mode 100644 website/docs/manifest_fields/resources/skipvalidation.mdx diff --git a/CHANGELOG.md b/CHANGELOG.md index 5febd97..5bca085 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 1.8.6 (2025-07-22) + +- Added support for inline `sql` for `command` and `query` resource types + ## 1.8.5 (2025-06-30) - Added support for resource scoped variables diff --git a/examples/databricks/serverless/resources/aws/DELETE_kms/metastore_key.iql b/examples/databricks/serverless/resources/aws/DELETE_kms/metastore_key.iql deleted file mode 100644 index 2669e91..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_kms/metastore_key.iql +++ /dev/null @@ -1,50 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( - SELECT key_id, - json_group_object(tag_key, tag_value) as tags - FROM aws.kms.key_tags - GROUP BY key_id - HAVING json_extract(tags, '$.Provisioner') = 'stackql' - AND json_extract(tags, '$.StackName') = '{{ stack_name }}' - AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' - AND json_extract(tags, '$.Purpose') = 'UC Metastore Encryption' -) t - -/*+ create */ -INSERT INTO aws.kms.keys ( - Description, - KeyPolicy, - Tags, - region -) -SELECT -'{{ description }}', -'{{ key_policy }}', -'{{ tags }}', -'{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( - SELECT key_id, - json_group_object(tag_key, tag_value) as tags - FROM aws.kms.key_tags - WHERE description = '{{ description }}' - GROUP BY key_id - HAVING json_extract(tags, '$.Provisioner') = 'stackql' - AND json_extract(tags, '$.StackName') = '{{ stack_name }}' - AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' - AND json_extract(tags, '$.Purpose') = 'UC Metastore Encryption' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT key_id, arn as key_arn, -json_group_object(tag_key, tag_value) as tags -FROM aws.kms.key_tags -WHERE description = '{{ description }}' -GROUP BY key_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Purpose') = 'UC Metastore Encryption' diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/elastic_ip.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/elastic_ip.iql deleted file mode 100644 index d4dd982..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/elastic_ip.iql +++ /dev/null @@ -1,56 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT allocation_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.eip_tags -WHERE region = '{{ region }}' -GROUP BY allocation_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ create */ -INSERT INTO aws.ec2.eips ( - NetworkBorderGroup, - Tags, - ClientToken, - region -) -SELECT -'{{ region }}', -'{{ tags }}', -'{{ idempotency_token }}', -'{{ region }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT allocation_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.eip_tags -WHERE region = '{{ region }}' -GROUP BY allocation_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT allocation_id as eip_allocation_id, public_ip as eip_public_id FROM -( -SELECT allocation_id, public_ip, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.eip_tags -WHERE region = '{{ region }}' -GROUP BY allocation_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ delete */ -DELETE FROM aws.ec2.eips -WHERE data__Identifier = '{{ eip_public_id }}|{{ eip_allocation_id}}' -AND region = '{{ region }}' diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/get_main_route_table_id.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/get_main_route_table_id.iql deleted file mode 100644 index 7679dd2..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/get_main_route_table_id.iql +++ /dev/null @@ -1,6 +0,0 @@ -/*+ exports, retries=3, retry_delay=5 */ -SELECT -route_table_id as main_route_table_id -FROM aws.ec2.route_tables -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gateway.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gateway.iql deleted file mode 100644 index dc42032..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gateway.iql +++ /dev/null @@ -1,54 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT internet_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.internet_gateway_tags -WHERE region = '{{ region }}' -GROUP BY internet_gateway_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ create */ -INSERT INTO aws.ec2.internet_gateways ( - Tags, - ClientToken, - region -) -SELECT -'{{ tags }}', -'{{ idempotency_token }}', -'{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT internet_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.internet_gateway_tags -WHERE region = '{{ region }}' -GROUP BY internet_gateway_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT internet_gateway_id FROM -( -SELECT internet_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.internet_gateway_tags -WHERE region = '{{ region }}' -GROUP BY internet_gateway_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ delete */ -DELETE FROM aws.ec2.internet_gateways -WHERE data__Identifier = '{{ internet_gateway_id }}' -AND region = '{{ region }}'; diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gw_attachment.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gw_attachment.iql deleted file mode 100644 index 31b9d25..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_gw_attachment.iql +++ /dev/null @@ -1,39 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT -attachment_type, -vpc_id -FROM aws.ec2.vpc_gateway_attachments -WHERE region = '{{ region }}' -AND internet_gateway_id = '{{ internet_gateway_id }}' -AND vpc_id = '{{ vpc_id }}' -) t - -/*+ create */ -INSERT INTO aws.ec2.vpc_gateway_attachments ( - InternetGatewayId, - VpcId, - region -) -SELECT - '{{ internet_gateway_id }}', - '{{ vpc_id }}', - '{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT -attachment_type, -vpc_id -FROM aws.ec2.vpc_gateway_attachments -WHERE region = '{{ region }}' -AND internet_gateway_id = '{{ internet_gateway_id }}' -AND vpc_id = '{{ vpc_id }}' -) t - -/*+ delete */ -DELETE FROM aws.ec2.vpc_gateway_attachments -WHERE data__Identifier = 'IGW|{{ vpc_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_route.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_route.iql deleted file mode 100644 index b46cc0f..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/inet_route.iql +++ /dev/null @@ -1,41 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT data__Identifier -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0' -) t - -/*+ create */ -INSERT INTO aws.ec2.routes ( - DestinationCidrBlock, - GatewayId, - RouteTableId, - region -) -SELECT - '0.0.0.0/0', - '{{ internet_gateway_id }}', - '{{ route_table_id }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT data__Identifier -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT data__Identifier as inet_route_indentifer -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0'; - -/*+ delete */ -DELETE FROM aws.ec2.routes -WHERE data__Identifier = '{{ inet_route_indentifer }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/nat_gateway.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/nat_gateway.iql deleted file mode 100644 index 081fbd2..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/nat_gateway.iql +++ /dev/null @@ -1,53 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT nat_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.nat_gateway_tags -WHERE region = '{{ region }}' -GROUP BY allocation_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ create */ -INSERT INTO aws.ec2.nat_gateways ( - AllocationId, - SubnetId, - Tags, - region -) -SELECT - '{{ eip_allocation_id }}', - '{{ nat_subnet_id }}', - '{{ tags }}', - '{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT nat_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.nat_gateway_tags -WHERE region = '{{ region }}' -GROUP BY allocation_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT nat_gateway_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.nat_gateway_tags -WHERE region = '{{ region }}' -GROUP BY allocation_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' - -/*+ delete */ -DELETE FROM aws.ec2.nat_gateways -WHERE data__Identifier = '{{ nat_gateway_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/nat_inet_route.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/nat_inet_route.iql deleted file mode 100644 index 9e750f6..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/nat_inet_route.iql +++ /dev/null @@ -1,41 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT data__Identifier -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0' -) t - -/*+ create */ -INSERT INTO aws.ec2.routes ( - DestinationCidrBlock, - NatGatewayId, - RouteTableId, - region -) -SELECT - '0.0.0.0/0', - '{{ nat_gateway_id }}', - '{{ route_table_id }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT data__Identifier -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT data__Identifier as nat_inet_route_indentifer -FROM aws.ec2.routes -WHERE region = '{{ region }}' -AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0'; - -/*+ delete */ -DELETE FROM aws.ec2.routes -WHERE data__Identifier = '{{ inet_route_indentifer }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/route_table.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/route_table.iql deleted file mode 100644 index 7b0aa76..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/route_table.iql +++ /dev/null @@ -1,54 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( -SELECT route_table_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.route_table_tags -WHERE region = '{{ region }}' -GROUP BY route_table_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Name') = '{{ route_table_name }}' -) t - -/*+ create */ -INSERT INTO aws.ec2.route_tables ( - VpcId, - Tags, - region -) -SELECT - '{{ vpc_id }}', - '{{ tags }}', - '{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT route_table_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.route_table_tags -WHERE region = '{{ region }}' -GROUP BY route_table_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Name') = '{{ route_table_name }}' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT route_table_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.route_table_tags -WHERE region = '{{ region }}' -GROUP BY route_table_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -AND json_extract(tags, '$.Name') = '{{ route_table_name }}' - -/*+ delete */ -DELETE FROM aws.ec2.route_tables -WHERE data__Identifier = '{{ route_table_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/security_group.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/security_group.iql deleted file mode 100644 index 15e9061..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/security_group.iql +++ /dev/null @@ -1,41 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM aws.ec2.security_groups -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND group_name = '{{ group_name }}' - -/*+ create */ -INSERT INTO aws.ec2.security_groups ( - GroupName, - GroupDescription, - VpcId, - Tags, - region -) -SELECT - '{{ group_name }}', - '{{ group_description }}', - '{{ vpc_id }}', - '{{ tags }}', - '{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM aws.ec2.security_groups -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND group_name = '{{ group_name }}' -AND group_description = '{{ group_description }}' - -/*+ exports, retries=3, retry_delay=5 */ -SELECT group_id as security_group_id -FROM aws.ec2.security_groups -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND group_name = '{{ group_name }}' - -/*+ delete */ -DELETE FROM aws.ec2.security_groups -WHERE data__Identifier = '{{ security_group_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/security_group_rules.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/security_group_rules.iql deleted file mode 100644 index 62f79eb..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/security_group_rules.iql +++ /dev/null @@ -1,27 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM aws.ec2.security_groups -WHERE region = '{{ region }}' -AND data__Identifier = '{{ security_group_id }}' - -/*+ createorupdate */ -update aws.ec2.security_groups -set data__PatchDocument = string('{{ { - "SecurityGroupIngress": security_group_ingress, - "SecurityGroupEgress": security_group_egress - } | generate_patch_document }}') -WHERE region = '{{ region }}' -AND data__Identifier = '{{ security_group_id }}' - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( - SELECT - JSON_EQUAL(security_group_ingress, '{{ security_group_ingress }}') as ingress_test, - JSON_EQUAL(security_group_egress, '{{ security_group_egress }}') as egress_test - FROM aws.ec2.security_groups - WHERE region = '{{ region }}' - AND data__Identifier = '{{ security_group_id }}' - AND ingress_test = 1 - AND egress_test = 1 -) t; diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/subnet.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/subnet.iql deleted file mode 100644 index 83667f5..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/subnet.iql +++ /dev/null @@ -1,43 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM aws.ec2.subnets -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND cidr_block = '{{ cidr_block }}' - -/*+ create */ -INSERT INTO aws.ec2.subnets ( - VpcId, - CidrBlock, - AvailabilityZone, - MapPublicIpOnLaunch, - Tags, - region -) -SELECT - '{{ vpc_id }}', - '{{ cidr_block }}', - '{{ availability_zone }}', - false, - '{{ tags }}', - '{{ region }}'; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM aws.ec2.subnets -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND cidr_block = '{{ cidr_block }}' -AND availability_zone = '{{ availability_zone }}'; - -/*+ exports, retries=3, retry_delay=5 */ -SELECT subnet_id -FROM aws.ec2.subnets -WHERE region = '{{ region }}' -AND vpc_id = '{{ vpc_id }}' -AND cidr_block = '{{ cidr_block }}'; - -/*+ delete */ -DELETE FROM aws.ec2.subnets -WHERE data__Identifier = '{{ subnet_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/subnet_rt_assn.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/subnet_rt_assn.iql deleted file mode 100644 index d0c8b33..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/subnet_rt_assn.iql +++ /dev/null @@ -1,34 +0,0 @@ -/*+ exists */ -select regexp_like(associationSet, '.*{{ subnet_id }}.*') as count from -aws.ec2_native.route_tables where region = '{{ region }}' -and routeTableId = '{{ route_table_id }}' - -/*+ create */ -INSERT INTO aws.ec2.subnet_route_table_associations ( - RouteTableId, - SubnetId, - ClientToken, - region -) -SELECT - '{{ route_table_id }}', - '{{ subnet_id }}', - '{{ idempotency_token }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -select regexp_like(associationSet, '.*{{ subnet_id }}.*') as count from -aws.ec2_native.route_tables where region = '{{ region }}' -and routeTableId = '{{ route_table_id }}' - -/*+ exports, retries=5, retry_delay=5 */ -SELECT id as route_table_assn_id -FROM aws.ec2.subnet_route_table_associations -WHERE region = '{{ region }}' -AND route_table_id = '{{ route_table_id }}' -AND subnet_id = '{{ subnet_id }}'; - -/*+ delete */ -DELETE FROM aws.ec2.subnet_route_table_associations -WHERE data__Identifier = '{{ route_table_assn_id }}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/tag_main_vpc_route_table.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/tag_main_vpc_route_table.iql deleted file mode 100644 index cc03c2a..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/tag_main_vpc_route_table.iql +++ /dev/null @@ -1,7 +0,0 @@ -/*+ command */ -update aws.ec2.route_tables -set data__PatchDocument = string('{{ { - "Tags": tags - } | generate_patch_document }}') -WHERE region = '{{ region }}' -AND data__Identifier = '{{ main_route_table_id }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/vpc.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/vpc.iql deleted file mode 100644 index 56e1c54..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/vpc.iql +++ /dev/null @@ -1,60 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( - SELECT vpc_id, - json_group_object(tag_key, tag_value) as tags - FROM aws.ec2.vpc_tags - WHERE region = '{{ region }}' - AND cidr_block = '{{ cidr_block }}' - GROUP BY vpc_id - HAVING json_extract(tags, '$.Provisioner') = 'stackql' - AND json_extract(tags, '$.StackName') = '{{ stack_name }}' - AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ create */ -INSERT INTO aws.ec2.vpcs ( - CidrBlock, - Tags, - EnableDnsSupport, - EnableDnsHostnames, - ClientToken, - region -) -SELECT - '{{ cidr_block }}', - '{{ tags }}', - true, - true, - '{{ idempotency_token }}', - '{{ region }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( - SELECT vpc_id, - json_group_object(tag_key, tag_value) as tags - FROM aws.ec2.vpc_tags - WHERE region = '{{ region }}' - AND cidr_block = '{{ cidr_block }}' - GROUP BY vpc_id - HAVING json_extract(tags, '$.Provisioner') = 'stackql' - AND json_extract(tags, '$.StackName') = '{{ stack_name }}' - AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t - -/*+ exports, retries=3, retry_delay=5 */ -SELECT vpc_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_tags -WHERE region = '{{ region }}' -AND cidr_block = '{{ cidr_block }}' -GROUP BY vpc_id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' - -/*+ delete */ -DELETE FROM aws.ec2.vpcs -WHERE data__Identifier = '{{ vpc_id}}' -AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/DELETE_vpc/vpc_endpoint.iql b/examples/databricks/serverless/resources/aws/DELETE_vpc/vpc_endpoint.iql deleted file mode 100644 index d40f522..0000000 --- a/examples/databricks/serverless/resources/aws/DELETE_vpc/vpc_endpoint.iql +++ /dev/null @@ -1,60 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count FROM -( - SELECT id, - json_group_object(tag_key, tag_value) as tags - FROM aws.ec2.vpc_endpoint_tags - WHERE region = '{{ region }}' - AND service_name = '{{ service_name }}' - GROUP BY id - HAVING json_extract(tags, '$.Provisioner') = 'stackql' - AND json_extract(tags, '$.StackName') = '{{ stack_name }}' - AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ create */ -INSERT INTO aws.ec2.vpc_endpoints ( - ServiceName, - VpcEndpointType, - VpcId, - RouteTableIds, - Tags, - region -) -SELECT - '{{ service_name }}', - '{{ vpc_endpoint_type }}', - '{{ vpc_id }}', - '{{ route_table_ids }}', - '{{ tags }}', - '{{ region }}'; - -/*+ statecheck, retries=5, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( - SELECT id, - json_group_object(tag_key, tag_value) as tags - FROM aws.ec2.vpc_endpoint_tags - WHERE region = '{{ region }}' - AND service_name = '{{ service_name }}' - GROUP BY id - HAVING json_extract(tags, '$.Provisioner') = 'stackql' - AND json_extract(tags, '$.StackName') = '{{ stack_name }}' - AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' -) t; - -/*+ exports, retries=3, retry_delay=5 */ -SELECT id as s3_gateway_endpoint_id, -json_group_object(tag_key, tag_value) as tags -FROM aws.ec2.vpc_endpoint_tags -WHERE region = '{{ region }}' -AND service_name = '{{ service_name }}' -GROUP BY id -HAVING json_extract(tags, '$.Provisioner') = 'stackql' -AND json_extract(tags, '$.StackName') = '{{ stack_name }}' -AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}'; - -/*+ delete */ -DELETE FROM aws.ec2.vpc_endpoints -WHERE data__Identifier = 's3_gateway_endpoint_id' -AND region = 'us-east-1'; \ No newline at end of file diff --git a/examples/databricks/serverless/resources/aws/iam/iam_role.iql b/examples/databricks/serverless/resources/aws/iam/iam_role.iql index ee628c4..4e4f6fa 100644 --- a/examples/databricks/serverless/resources/aws/iam/iam_role.iql +++ b/examples/databricks/serverless/resources/aws/iam/iam_role.iql @@ -34,7 +34,7 @@ set data__PatchDocument = string('{{ { WHERE data__Identifier = '{{ role_name }}' AND region = 'us-east-1'; -/*+ statecheck, retries=3, retry_delay=5 */ +/*+ statecheck, retries=5, retry_delay=5 */ SELECT COUNT(*) as count FROM ( SELECT max_session_duration, diff --git a/examples/databricks/serverless/resources/databricks_account/workspace.iql b/examples/databricks/serverless/resources/databricks_account/workspace.iql index 9da2dea..9be8c78 100644 --- a/examples/databricks/serverless/resources/databricks_account/workspace.iql +++ b/examples/databricks/serverless/resources/databricks_account/workspace.iql @@ -32,7 +32,9 @@ AND storage_configuration_id = '{{ storage_configuration_id }}' AND pricing_tier = '{{ pricing_tier }}' /*+ exports */ -SELECT workspace_id AS databricks_workspace_id, +SELECT +'{{ workspace_name }}' AS databricks_workspace_name, +workspace_id AS databricks_workspace_id, deployment_name AS databricks_deployment_name FROM databricks_account.provisioning.workspaces WHERE account_id = '{{ databricks_account_id }}' diff --git a/examples/databricks/serverless/resources/databricks_workspace/external_location.iql b/examples/databricks/serverless/resources/databricks_workspace/external_location.iql new file mode 100644 index 0000000..971ecdf --- /dev/null +++ b/examples/databricks/serverless/resources/databricks_workspace/external_location.iql @@ -0,0 +1,46 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM databricks_workspace.unitycatalog.external_locations +WHERE name = '{{ name | replace('-', '_') }}' AND +deployment_name = '{{ databricks_deployment_name }}'; + +/*+ create */ +INSERT INTO databricks_workspace.unitycatalog.external_locations ( +deployment_name, +data__name, +data__url, +data__credential_name, +data__read_only, +data__comment, +data__skip_validation +) +SELECT +'{{ databricks_deployment_name }}', +'{{ name | replace('-', '_') }}', +'{{ url }}', +'{{ credential_name | replace('-', '_') }}', +{{ read_only }}, +'{{ comment }}', +{{ skip_validation }} +; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count +FROM databricks_workspace.unitycatalog.external_locations +WHERE name = '{{ name | replace('-', '_') }}' AND +deployment_name = '{{ databricks_deployment_name }}' +AND url = '{{ url }}' AND +credential_name = '{{ credential_name | replace('-', '_') }}' AND +read_only = {{ read_only }} AND +comment = '{{ comment }}'; + +/*+ exports */ +SELECT name as external_location_name +FROM databricks_workspace.unitycatalog.external_locations +WHERE name = '{{ name | replace('-', '_') }}' AND +deployment_name = '{{ databricks_deployment_name }}' + +/*+ delete */ +DELETE FROM databricks_workspace.unitycatalog.external_locations +WHERE name = '{{ name | replace('-', '_') }}' AND +deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/serverless/stackql_manifest.yml b/examples/databricks/serverless/stackql_manifest.yml index 02118dd..c20030c 100644 --- a/examples/databricks/serverless/stackql_manifest.yml +++ b/examples/databricks/serverless/stackql_manifest.yml @@ -18,9 +18,6 @@ globals: - name: region description: aws region value: "{{ AWS_REGION }}" - - name: region - description: aws region - value: "{{ AWS_REGION }}" - name: global_tags value: - Key: Provisioner @@ -361,6 +358,7 @@ resources: Value: "Unity Catalog Storage Credential" merge: - global_tags + skip_validation: true exports: - aws_iam_role_arn: metastore_access_role_arn @@ -381,6 +379,7 @@ resources: - name: pricing_tier value: PREMIUM exports: + - databricks_workspace_name - databricks_workspace_id - databricks_deployment_name @@ -412,7 +411,7 @@ resources: - name: databricks_workspace/storage_credential props: - name: name - value: "{{ stack_name }}-{{ stack_env }}-storage-credential" + value: "{{ stack_name }}_{{ stack_env }}_storage_credential" - name: comment value: "Storage credential for {{ stack_name }} {{ stack_env }} metastore S3 access" - name: read_only @@ -426,20 +425,6 @@ resources: - storage_credential_name - storage_credential_external_id - - name: databricks_workspace/unitycatalog/grants - type: command - props: - - name: privileges - value: - - "ALL_PRIVILEGES" - - "MANAGE" - sql: | - UPDATE databricks_workspace.unitycatalog.grants - SET data__changes = '[{"add": {{ privileges }},"principal": "{{ databricks_group_name }}"}]' - WHERE full_name = '{{ storage_credential_name }}' AND - securable_type = 'storage_credential' AND - deployment_name = '{{ databricks_deployment_name }}'; - - name: aws/iam/update_metastore_access_role type: command props: @@ -458,3 +443,50 @@ resources: Condition: StringEquals: sts:ExternalId: "{{ storage_credential_external_id }}" + + - name: databricks_workspace/unitycatalog/credential_grants + type: command + props: + - name: privileges + value: + - "ALL_PRIVILEGES" + - "MANAGE" + sql: | + UPDATE databricks_workspace.unitycatalog.grants + SET data__changes = '[{"add": {{ privileges }},"principal": "{{ databricks_group_name }}"}]' + WHERE full_name = '{{ storage_credential_name }}' AND + securable_type = 'storage_credential' AND + deployment_name = '{{ databricks_deployment_name }}'; + + - name: databricks_workspace/external_location + props: + - name: name + value: "{{ stack_name }}_{{ stack_env }}_external_location" + - name: comment + value: "External location for {{ stack_name }} {{ stack_env }} metastore S3 access" + - name: url + value: "s3://{{ aws_s3_metastore_bucket_name }}/unitycatalog/demo" + - name: credential_name + value: "{{ storage_credential_name }}" + - name: read_only + value: false + - name: skip_validation + value: false + exports: + - external_location_name + + - name: databricks_workspace/unitycatalog/location_grants + type: command + props: + - name: privileges + value: + - "ALL_PRIVILEGES" + - "MANAGE" + sql: | + UPDATE databricks_workspace.unitycatalog.grants + SET data__changes = '[{"add": {{ privileges }},"principal": "{{ databricks_group_name }}"}]' + WHERE full_name = '{{ external_location_name }}' AND + securable_type = 'external_location' AND + deployment_name = '{{ databricks_deployment_name }}'; + + diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/credentials.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/credentials.iql deleted file mode 100644 index c0d8327..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/credentials.iql +++ /dev/null @@ -1,41 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' -AND credentials_name = '{{ credentials_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.credentials ( -account_id, -data__credentials_name, -data__aws_credentials -) -SELECT -'{{ databricks_account_id }}', -'{{ credentials_name }}', -'{{ aws_credentials }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT -credentials_id -FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' -AND credentials_name = '{{ credentials_name }}' -AND JSON_EXTRACT(aws_credentials, '$.sts_role.role_arn') = '{{ aws_iam_cross_account_role_arn }}' -) t - -/*+ exports */ -SELECT -'{{ credentials_name }}' as databricks_credentials_name, -credentials_id as databricks_credentials_id, -JSON_EXTRACT(aws_credentials, '$.sts_role.external_id') as databricks_role_external_id -FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' -AND credentials_name = '{{ credentials_name }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.credentials -WHERE account_id = '{{ databricks_account_id }}' AND -credentials_id = '{{ databricks_credentials_id }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/get_users.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/get_users.iql deleted file mode 100644 index e94c2d7..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/get_users.iql +++ /dev/null @@ -1,6 +0,0 @@ -/*+ exports, retries=3, retry_delay=5 */ -SELECT -JSON_GROUP_ARRAY(JSON_OBJECT('value', id)) as databricks_workspace_group_members -FROM databricks_account.iam.users -WHERE account_id = '{{ databricks_account_id }}' -AND userName in {{ users | sql_list }}; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/network.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/network.iql deleted file mode 100644 index 45e0b0a..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/network.iql +++ /dev/null @@ -1,46 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' -AND network_name = '{{ databricks_network_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.networks ( -account_id, -data__network_name, -data__vpc_id, -data__subnet_ids, -data__security_group_ids -) -SELECT -'{{ databricks_account_id }}', -'{{ databricks_network_name }}', -'{{ vpc_id }}', -'{{ subnet_ids }}', -'{{ security_group_ids }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count FROM -( -SELECT -JSON_EQUAL(subnet_ids, '{{ subnet_ids }}') as subnet_test, -JSON_EQUAL(security_group_ids, '{{ security_group_ids }}') as sg_test -FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' -AND network_name = '{{ databricks_network_name }}' -AND vpc_id = '{{ vpc_id }}' -AND subnet_test = 1 -AND sg_test = 1 -)t - -/*+ exports */ -SELECT -network_id as databricks_network_id -FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' AND -network_name = '{{ databricks_network_name }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.networks -WHERE account_id = '{{ databricks_account_id }}' AND -network_id = '{{ databricks_network_id }}' \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/storage_configuration.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/storage_configuration.iql deleted file mode 100644 index 4e60cfc..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/storage_configuration.iql +++ /dev/null @@ -1,35 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' -AND storage_configuration_name = '{{ storage_configuration_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.storage ( -account_id, -data__storage_configuration_name, -data__root_bucket_info -) -SELECT -'{{ databricks_account_id }}', -'{{ storage_configuration_name }}', -'{{ root_bucket_info }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' -AND storage_configuration_name = '{{ storage_configuration_name }}' -AND JSON_EXTRACT(root_bucket_info, '$.bucket_name') = '{{ aws_s3_workspace_bucket_name }}' - -/*+ exports */ -SELECT -storage_configuration_id as databricks_storage_configuration_id -FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' -AND storage_configuration_name = '{{ storage_configuration_name }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.storage -WHERE account_id = '{{ databricks_account_id }}' AND -storage_configuration_id = '{{ databricks_storage_configuration_id }}' \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/update_group_membership.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/update_group_membership.iql deleted file mode 100644 index 375d926..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/update_group_membership.iql +++ /dev/null @@ -1,6 +0,0 @@ -/*+ command */ -update databricks_account.iam.groups -set data__schemas = '["urn:ietf:params:scim:api:messages:2.0:PatchOp"]', -data__Operations = '[{"op": "replace", "path": "members", "value": {{ databricks_workspace_group_members }} }]' -WHERE account_id = '{{ databricks_account_id }}' -AND id = '{{ databricks_group_id }}'; diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace.iql deleted file mode 100644 index 9da2dea..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace.iql +++ /dev/null @@ -1,44 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' -AND workspace_name = '{{ workspace_name }}' - -/*+ create */ -INSERT INTO databricks_account.provisioning.workspaces ( -account_id, -data__workspace_name, -data__aws_region, -data__credentials_id, -data__storage_configuration_id, -data__pricing_tier -) -SELECT -'{{ databricks_account_id }}', -'{{ workspace_name }}', -'{{ aws_region }}', -'{{ credentials_id }}', -'{{ storage_configuration_id }}', -'{{ pricing_tier }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' -AND workspace_name = '{{ workspace_name }}' -AND aws_region = '{{ aws_region }}' -AND credentials_id = '{{ credentials_id }}' -AND storage_configuration_id = '{{ storage_configuration_id }}' -AND pricing_tier = '{{ pricing_tier }}' - -/*+ exports */ -SELECT workspace_id AS databricks_workspace_id, -deployment_name AS databricks_deployment_name -FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' -AND workspace_name = '{{ workspace_name }}' - -/*+ delete */ -DELETE FROM databricks_account.provisioning.workspaces -WHERE account_id = '{{ databricks_account_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_group.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_group.iql deleted file mode 100644 index 733b6f4..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_group.iql +++ /dev/null @@ -1,32 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' -AND displayName = '{{ display_name }}' - -/*+ create */ -INSERT INTO databricks_account.iam.groups ( -account_id, -data__displayName -) -SELECT -'{{ databricks_account_id }}', -'{{ display_name }}' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' -AND displayName = '{{ display_name }}' - -/*+ exports */ -SELECT id AS databricks_group_id, -displayName AS databricks_group_name -FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' -AND displayName = '{{ display_name }}' - -/*+ delete */ -DELETE FROM databricks_account.iam.groups -WHERE account_id = '{{ databricks_account_id }}' AND -id = '{{ databricks_group_id }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_permission_assignments.iql b/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_permission_assignments.iql deleted file mode 100644 index 00387e3..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_account/workspace_permission_assignments.iql +++ /dev/null @@ -1,32 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_account.iam.workspace_permission_assignments -WHERE account_id = '{{ databricks_account_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' -AND JSON_EXTRACT(principal, '$.principal_id') = {{ databricks_group_id }} - -/*+ createorupdate */ -INSERT INTO databricks_account.iam.workspace_permission_assignments ( -account_id, -principal_id, -workspace_id, -data__permissions -) -SELECT -'{{ databricks_account_id }}', -'{{ databricks_group_id }}', -'{{ databricks_workspace_id }}', -'["ADMIN"]' - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM databricks_account.iam.workspace_permission_assignments -WHERE account_id = '{{ databricks_account_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' -AND JSON_EXTRACT(principal, '$.principal_id') = {{ databricks_group_id }} - -/*+ delete */ -DELETE FROM databricks_account.iam.workspace_permission_assignments -WHERE account_id = '{{ databricks_account_id }}' AND -principal_id = '{{ databricks_group_id }}' AND -workspace_id = '{{ databricks_workspace_id }}' \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql new file mode 100644 index 0000000..79424d2 --- /dev/null +++ b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql @@ -0,0 +1,32 @@ +/*+ create */ +INSERT INTO databricks_workspace.unitycatalog.catalogs ( +deployment_name, +data__name, +data__comment, +data__storage_root +) +SELECT +'{{ databricks_deployment_name }}', +'{{ name }}', +'{{ comment }}', +'{{ storage_root }}' +; + +/*+ statecheck */ +SELECT COUNT(*) as count +FROM databricks_workspace.unitycatalog.catalogs +WHERE name = '{{ name }}' AND +deployment_name = '{{ databricks_deployment_name }}' +AND storage_root = '{{ storage_root }}' AND +comment = '{{ comment }}'; + +/*+ exports */ +SELECT name as catalog_name +FROM databricks_workspace.unitycatalog.catalogs +WHERE name = '{{ name }}' AND +deployment_name = '{{ databricks_deployment_name }}'; + +/*+ delete */ +DELETE FROM databricks_workspace.unitycatalog.catalogs +WHERE name = '{{ name }}' AND +deployment_name = '{{ deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql new file mode 100644 index 0000000..bb5f401 --- /dev/null +++ b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql @@ -0,0 +1,32 @@ +/*+ create */ +INSERT INTO databricks_workspace.unitycatalog.schemas ( +deployment_name, +data__name, +data__catalog_name, +data__comment +) +SELECT +'{{ databricks_deployment_name }}', +'{{ name }}', +'{{ catalog_name }}', +'{{ comment }}' +; + +/*+ statecheck */ +SELECT COUNT(*) as count +FROM databricks_workspace.unitycatalog.schemas +WHERE deployment_name = '{{ databricks_deployment_name }}' +AND catalog_name = '{{ catalog_name }}' +AND name = '{{ name }}'; + +/*+ exports */ +SELECT name as schema_name +FROM databricks_workspace.unitycatalog.schemas +WHERE deployment_name = '{{ databricks_deployment_name }}' +AND catalog_name = '{{ catalog_name }}' +AND name = '{{ name }}'; + +/*+ delete */ +DELETE FROM databricks_workspace.unitycatalog.schemas +WHERE full_name = '{{ name }}' AND +deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/service_principal.iql b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/service_principal.iql new file mode 100644 index 0000000..355adee --- /dev/null +++ b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/service_principal.iql @@ -0,0 +1,31 @@ +/*+ create */ +INSERT INTO databricks_workspace.iam.service_principals ( +deployment_name, +data__displayName, +data__active +) +SELECT +'{{ databricks_deployment_name }}', +'{{ name }}', +true +; + +/*+ statecheck */ +SELECT COUNT(*) as count +FROM databricks_workspace.iam.service_principals +WHERE deployment_name = '{{ databricks_deployment_name }}' +AND displayName = '{{ name }}' +AND active = true; + +/*+ exports */ +SELECT id as service_principal_id, +applicationId as service_principal_application_id, +displayName as service_principal_name +FROM databricks_workspace.iam.service_principals +WHERE deployment_name = '{{ databricks_deployment_name }}' +AND displayName = '{{ name }}'; + +/*+ delete */ +DELETE FROM databricks_workspace.iam.service_principals +WHERE id = '{{ service_principal_id }}' AND +deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/storage_credential.iql b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/storage_credential.iql deleted file mode 100644 index b63f288..0000000 --- a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/storage_credential.iql +++ /dev/null @@ -1,43 +0,0 @@ -/*+ exists */ -SELECT COUNT(*) as count -FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}'; - -/*+ create */ -INSERT INTO databricks_workspace.unitycatalog.storage_credentials ( -deployment_name, -data__name, -data__comment, -data__read_only, -data__aws_iam_role, -data__skip_validation -) -SELECT -'{{ databricks_deployment_name }}', -'{{ name | replace('-', '_') | upper }}', -'{{ comment }}', -'{{ read_only }}', -'{{ aws_iam_role }}', -'{{ skip_validation }}' -; - -/*+ statecheck, retries=3, retry_delay=5 */ -SELECT COUNT(*) as count -FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}' AND -JSON_EXTRACT(aws_iam_role, '$.role_arn') = '{{ metastore_access_role_arn }}'; - -/*+ exports */ -SELECT -name as storage_credential_name, -JSON_EXTRACT(aws_iam_role, '$.external_id') as storage_credential_external_id -FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}'; - -/*+ delete */ -DELETE FROM databricks_workspace.unitycatalog.storage_credentials -WHERE name = '{{ name | replace('-', '_') | upper }}' AND -deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql b/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql new file mode 100644 index 0000000..dfbe1ab --- /dev/null +++ b/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql @@ -0,0 +1,19 @@ +/*+ command */ +INSERT INTO snowflake.sqlapi.statements ( +data__statement, +data__timeout, +data__database, +data__schema, +data__warehouse, +"User-Agent", +endpoint +) +SELECT +'{{ statement }}', +{{ timeout }}, +'{{ database }}', +'{{ schema }}', +'{{ warehouse }}', +'{{ "User-Agent" }}', +'{{ snowflake_endpoint }}' +; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/stackql_manifest.yml b/examples/databricks/snowflake-interoperability/stackql_manifest.yml index 791062b..e5cbe1f 100644 --- a/examples/databricks/snowflake-interoperability/stackql_manifest.yml +++ b/examples/databricks/snowflake-interoperability/stackql_manifest.yml @@ -6,15 +6,33 @@ providers: - databricks_account - databricks_workspace globals: + - name: databricks_workspace_name + description: databricks workspace name + value: "{{ DATABRICKS_WORKSPACE_NAME }}" + - name: databricks_metastore_name + description: databricks metastore name + value: "{{ DATABRICKS_METASTORE_NAME }}" + - name: databricks_external_location + description: databricks external location + value: "{{ DATABRICKS_EXTERNAL_LOCATION }}" + - name: databricks_admin_group + description: databricks admin group + value: "{{ DATABRICKS_ADMIN_GROUP }}" - name: databricks_account_id description: databricks account id value: "{{ DATABRICKS_ACCOUNT_ID }}" - - name: snowflake_org - description: snowflake org - value: "{{ SNOWFLAKE_ORG }}" - - name: snowflake_account - description: snowflake account - value: "{{ SNOWFLAKE_ACCOUNT }}" + - name: snowflake_endpoint + description: snowflake endpoint (org-account) + value: "{{ SNOWFLAKE_ORG }}-{{ SNOWFLAKE_ACCOUNT }}" + - name: snowflake_db + description: snowflake database + value: "{{ SNOWFLAKE_DB }}" + - name: snowflake_schema + description: snowflake schema + value: "{{ SNOWFLAKE_SCHEMA }}" + - name: snowflake_whse + description: snowflake warehouse (org-account) + value: "{{ SNOWFLAKE_WAREHOUSE }}" - name: global_tags value: - Key: Provisioner @@ -26,472 +44,244 @@ globals: resources: # ==================================================================================== -# IAM and Cloud Credentials +# Source Required Variables # ==================================================================================== - - - name: aws/iam/cross_account_role - file: aws/iam/iam_role.iql - props: - - name: role_name - value: "{{ stack_name }}-{{ stack_env }}-role" - - name: assume_role_policy_document - value: - Version: "2012-10-17" - Statement: - - Sid: "" - Effect: "Allow" - Principal: - AWS: "arn:aws:iam::{{ databricks_aws_account_id }}:root" - Action: "sts:AssumeRole" - Condition: - StringEquals: - sts:ExternalId: "{{ databricks_account_id }}" - - name: description - value: 'allows Databricks to access resources in ({{ stack_name }}-{{ stack_env }})' - - name: path - value: '/' - - name: policies - value: - - PolicyDocument: - Statement: - - Sid: Stmt1403287045000 - Effect: Allow - Action: - - "ec2:AllocateAddress" - - "ec2:AssociateDhcpOptions" - - "ec2:AssociateIamInstanceProfile" - - "ec2:AssociateRouteTable" - - "ec2:AttachInternetGateway" - - "ec2:AttachVolume" - - "ec2:AuthorizeSecurityGroupEgress" - - "ec2:AuthorizeSecurityGroupIngress" - - "ec2:CancelSpotInstanceRequests" - - "ec2:CreateDhcpOptions" - - "ec2:CreateInternetGateway" - - "ec2:CreateKeyPair" - - "ec2:CreateNatGateway" - - "ec2:CreatePlacementGroup" - - "ec2:CreateRoute" - - "ec2:CreateRouteTable" - - "ec2:CreateSecurityGroup" - - "ec2:CreateSubnet" - - "ec2:CreateTags" - - "ec2:CreateVolume" - - "ec2:CreateVpc" - - "ec2:CreateVpcEndpoint" - - "ec2:DeleteDhcpOptions" - - "ec2:DeleteInternetGateway" - - "ec2:DeleteKeyPair" - - "ec2:DeleteNatGateway" - - "ec2:DeletePlacementGroup" - - "ec2:DeleteRoute" - - "ec2:DeleteRouteTable" - - "ec2:DeleteSecurityGroup" - - "ec2:DeleteSubnet" - - "ec2:DeleteTags" - - "ec2:DeleteVolume" - - "ec2:DeleteVpc" - - "ec2:DeleteVpcEndpoints" - - "ec2:DescribeAvailabilityZones" - - "ec2:DescribeIamInstanceProfileAssociations" - - "ec2:DescribeInstanceStatus" - - "ec2:DescribeInstances" - - "ec2:DescribeInternetGateways" - - "ec2:DescribeNatGateways" - - "ec2:DescribePlacementGroups" - - "ec2:DescribePrefixLists" - - "ec2:DescribeReservedInstancesOfferings" - - "ec2:DescribeRouteTables" - - "ec2:DescribeSecurityGroups" - - "ec2:DescribeSpotInstanceRequests" - - "ec2:DescribeSpotPriceHistory" - - "ec2:DescribeSubnets" - - "ec2:DescribeVolumes" - - "ec2:DescribeVpcs" - - "ec2:DescribeVpcAttribute" - - "ec2:DescribeNetworkAcls" - - "ec2:DetachInternetGateway" - - "ec2:DisassociateIamInstanceProfile" - - "ec2:DisassociateRouteTable" - - "ec2:ModifyVpcAttribute" - - "ec2:ReleaseAddress" - - "ec2:ReplaceIamInstanceProfileAssociation" - - "ec2:ReplaceRoute" - - "ec2:RequestSpotInstances" - - "ec2:RevokeSecurityGroupEgress" - - "ec2:RevokeSecurityGroupIngress" - - "ec2:RunInstances" - - "ec2:TerminateInstances" - Resource: - - "*" - - Effect: Allow - Action: - - "iam:CreateServiceLinkedRole" - - "iam:PutRolePolicy" - Resource: - - arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot - Condition: - StringLike: - "iam:AWSServiceName": spot.amazonaws.com - Version: '2012-10-17' - PolicyName: "{{ stack_name }}-{{ stack_env }}-policy" + + - name: get_workspace_deployment_name + type: query + props: [] + sql: | + SELECT + deployment_name as databricks_deployment_name, + workspace_status, + workspace_status_message + FROM databricks_account.provisioning.workspaces + WHERE account_id = '{{ databricks_account_id }}'; exports: - - aws_iam_role_name: aws_iam_cross_account_role_name - - aws_iam_role_arn: aws_iam_cross_account_role_arn + - databricks_deployment_name + - workspace_status + - workspace_status_message - - name: databricks_account/credentials - props: - - name: credentials_name - value: "{{ stack_name }}-{{ stack_env }}-credentials" - - name: aws_credentials - value: - sts_role: - role_arn: "{{ aws_iam_cross_account_role_arn }}" + - name: get_metastore_id + type: query + props: [] + sql: | + SELECT + metastore_id + FROM databricks_workspace.unitycatalog.metastores + WHERE deployment_name = '{{ databricks_deployment_name }}' + AND name = '{{ databricks_metastore_name }}'; exports: - - databricks_credentials_name - - databricks_credentials_id - - databricks_role_external_id + - metastore_id # ==================================================================================== -# Storage +# Enable External Access # ==================================================================================== - - name: aws/s3/workspace_bucket - file: aws/s3/s3_bucket.iql - props: - - name: bucket_name - value: "{{ stack_name }}-{{ stack_env }}-root-bucket" - - name: ownership_controls - value: - Rules: - - ObjectOwnership: "BucketOwnerPreferred" - - name: bucket_encryption - value: - ServerSideEncryptionConfiguration: - - BucketKeyEnabled: true - ServerSideEncryptionByDefault: - SSEAlgorithm: "AES256" - - name: public_access_block_configuration - value: - BlockPublicAcls: true - IgnorePublicAcls: true - BlockPublicPolicy: true - RestrictPublicBuckets: true - - name: versioning_configuration - value: - Status: "Suspended" - exports: - - arn: aws_s3_workspace_bucket_arn - - bucket_name: aws_s3_workspace_bucket_name - - - name: aws/s3/workspace_bucket_policy - file: aws/s3/s3_bucket_policy.iql - props: - - name: policy_document - value: - Version: "2012-10-17" - Statement: - - Sid: Grant Databricks Access - Effect: Allow - Principal: - AWS: "arn:aws:iam::{{ databricks_aws_account_id }}:root" - Action: - - "s3:GetObject" - - "s3:GetObjectVersion" - - "s3:PutObject" - - "s3:DeleteObject" - - "s3:ListBucket" - - "s3:GetBucketLocation" - Resource: - - "{{ aws_s3_workspace_bucket_arn }}/*" - - "{{ aws_s3_workspace_bucket_arn }}" - - - name: databricks_account/storage_configuration - props: - - name: storage_configuration_name - value: "{{ stack_name }}-{{ stack_env }}-storage" - - name: root_bucket_info - value: - bucket_name: "{{ aws_s3_workspace_bucket_name }}" - exports: - - databricks_storage_configuration_id + - name: enable_external_access + type: command + props: [] + sql: | + UPDATE databricks_workspace.unitycatalog.metastores + SET data__external_access_enabled = 'true' + WHERE id = '{{ metastore_id }}' AND + deployment_name = '{{ databricks_deployment_name }}'; # ==================================================================================== -# UC Storage Credential and Metastore Catalog Bucket +# DBX UC Catalog and Schema # ==================================================================================== - - name: aws/s3/metastore_bucket - file: aws/s3/s3_bucket.iql + - name: interoperability_catalog + file: databricks_workspace/catalog.iql props: - - name: bucket_name - value: "{{ stack_name }}-{{ stack_env }}-metastore" - - name: ownership_controls - value: - Rules: - - ObjectOwnership: "BucketOwnerPreferred" - - name: bucket_encryption - value: - ServerSideEncryptionConfiguration: - - BucketKeyEnabled: true - ServerSideEncryptionByDefault: - SSEAlgorithm: "AES256" - - name: public_access_block_configuration - value: - BlockPublicAcls: true - IgnorePublicAcls: true - BlockPublicPolicy: true - RestrictPublicBuckets: true - - name: versioning_configuration - value: - Status: "Suspended" + - name: name + value: uc_interoperability + - name: comment + value: "Interoperability demonstration catalog for Databricks and Snowflake" + - name: storage_root + value: "{{ databricks_external_location }}" exports: - - arn: aws_s3_metastore_bucket_arn - - bucket_name: aws_s3_metastore_bucket_name + - catalog_name - - name: aws/iam/metastore_access_role - file: aws/iam/iam_role.iql - props: - - name: role_name - value: "{{ stack_name }}-{{ stack_env }}-metastore-role" - - name: assume_role_policy_document - value: - Version: "2012-10-17" - Statement: - - Effect: "Allow" - Principal: - AWS: - - "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" - Action: "sts:AssumeRole" - Condition: - StringEquals: - sts:ExternalId: "0000" # Placeholder - - name: description - value: 'Unity Catalog metastore access role for ({{ stack_name }}-{{ stack_env }})' - - name: path - value: '/' - - name: policies - value: - - PolicyName: "MetastoreS3Policy" - PolicyDocument: - Version: "2012-10-17" - Statement: - - Effect: "Allow" - Action: - - "s3:GetObject" - - "s3:PutObject" - - "s3:DeleteObject" - - "s3:ListBucket" - - "s3:GetBucketLocation" - - "s3:ListBucketMultipartUploads" - - "s3:ListMultipartUploadParts" - - "s3:AbortMultipartUpload" - Resource: - - "{{ aws_s3_metastore_bucket_arn }}/*" - - "{{ aws_s3_metastore_bucket_arn }}" - - # - Effect: "Allow" - # Action: - # - "kms:Decrypt" - # - "kms:Encrypt" - # - "kms:GenerateDataKey*" - # Resource: - # - "arn:aws:kms:" - - - Effect: "Allow" - Action: - - "sts:AssumeRole" - Resource: - - "arn:aws:iam::{{ databricks_aws_account_id }}:role/{{ stack_name }}-{{ stack_env }}-metastore-role" - - - Sid: "ManagedFileEventsSetupStatement" - Effect: "Allow" - Action: - - "s3:GetBucketNotification" - - "s3:PutBucketNotification" - - "sns:ListSubscriptionsByTopic" - - "sns:GetTopicAttributes" - - "sns:SetTopicAttributes" - - "sns:CreateTopic" - - "sns:TagResource" - - "sns:Publish" - - "sns:Subscribe" - - "sqs:CreateQueue" - - "sqs:DeleteMessage" - - "sqs:ReceiveMessage" - - "sqs:SendMessage" - - "sqs:GetQueueUrl" - - "sqs:GetQueueAttributes" - - "sqs:SetQueueAttributes" - - "sqs:TagQueue" - - "sqs:ChangeMessageVisibility" - - "sqs:PurgeQueue" - Resource: - - "{{ aws_s3_metastore_bucket_arn }}" - - "arn:aws:sqs:*:*:csms-*" - - "arn:aws:sns:*:*:csms-*" - - - Sid: "ManagedFileEventsListStatement" - Effect: "Allow" - Action: - - "sqs:ListQueues" - - "sqs:ListQueueTags" - - "sns:ListTopics" - Resource: - - "arn:aws:sqs:*:*:csms-*" - - "arn:aws:sns:*:*:csms-*" - - - Sid: "ManagedFileEventsTeardownStatement" - Effect: "Allow" - Action: - - "sns:Unsubscribe" - - "sns:DeleteTopic" - - "sqs:DeleteQueue" - Resource: - - "arn:aws:sqs:*:*:csms-*" - - "arn:aws:sns:*:*:csms-*" - - name: tags + - name: catalog_grants + type: command + props: + - name: privileges value: - - Key: Purpose - Value: "Unity Catalog Storage Credential" - merge: - - global_tags - exports: - - aws_iam_role_arn: metastore_access_role_arn - -# ==================================================================================== -# DBX Workspace -# ==================================================================================== - - - name: databricks_account/workspace - props: - - name: workspace_name - value: "{{ stack_name }}-{{ stack_env }}-workspace" - - name: aws_region - value: "{{ region }}" - - name: credentials_id - value: "{{ databricks_credentials_id }}" - - name: storage_configuration_id - value: "{{ databricks_storage_configuration_id }}" - - name: pricing_tier - value: PREMIUM - exports: - - databricks_workspace_id - - databricks_deployment_name - - - name: databricks_account/workspace_group - props: - - name: display_name - value: "{{ stack_name }}-{{ stack_env }}-workspace-admins" - exports: - - databricks_group_id - - databricks_group_name + - "ALL_PRIVILEGES" + - "MANAGE" + sql: | + UPDATE databricks_workspace.unitycatalog.grants + SET data__changes = '[{"add": {{ privileges }},"principal": "{{ databricks_admin_group }}"}]' + WHERE full_name = '{{ catalog_name }}' AND + securable_type = 'catalog' AND + deployment_name = '{{ databricks_deployment_name }}'; - - name: databricks_account/get_users - type: query + - name: interoperability_schema + file: databricks_workspace/schema.iql props: - - name: users - value: - - "javen@stackql.io" - - "krimmer@stackql.io" + - name: name + value: demo_schema + - name: catalog_name + value: "{{ catalog_name}}" + - name: comment + value: "Demo schema for interoperability" exports: - - databricks_workspace_group_members + - schema_name - - name: databricks_account/update_group_membership - type: command - props: [] +# ==================================================================================== +# Create Bronze Iceberg Table, Silver and Gold Delta Tables - Do this in the Workspace +# =================================================================================== - - name: databricks_account/workspace_permission_assignments - props: [] +# ==================================================================================== +# Create Service Principal and Secret for Catalog Integration +# ==================================================================================== - - name: databricks_workspace/storage_credential + - name: service_principal + file: databricks_workspace/service_principal.iql props: - name: name - value: "{{ stack_name }}-{{ stack_env }}-storage-credential" - - name: comment - value: "Storage credential for {{ stack_name }} {{ stack_env }} metastore S3 access" - - name: read_only - value: false - - name: aws_iam_role - value: - role_arn: "{{ metastore_access_role_arn }}" - - name: skip_validation - value: false - exports: - - storage_credential_name - - storage_credential_external_id + value: interoperability_service_principal + exports: + - service_principal_name + - service_principal_application_id + - service_principal_id - - name: databricks_workspace/unitycatalog/grants + - name: service_principal_grant_external_use type: command props: - name: privileges value: - - "ALL_PRIVILEGES" - - "MANAGE" + - "EXTERNAL_USE_SCHEMA" + - "SELECT" + - "USE_CATALOG" + - "USE_SCHEMA" sql: | UPDATE databricks_workspace.unitycatalog.grants - SET data__changes = '[{"add": {{ privileges }},"principal": "{{ databricks_group_name }}"}]' - WHERE full_name = '{{ storage_credential_name }}' AND - securable_type = 'storage_credential' AND + SET data__changes = '[{"add": {{ privileges }},"principal": "{{ service_principal_application_id }}"}]' + WHERE full_name = '{{ catalog_name }}' AND + securable_type = 'catalog' AND deployment_name = '{{ databricks_deployment_name }}'; - - name: aws/iam/update_metastore_access_role - type: command + - name: service_principal_secret + type: query props: - - name: role_name - value: "{{ stack_name }}-{{ stack_env }}-metastore-role" - - name: assume_role_policy_document - value: - Version: "2012-10-17" - Statement: - - Effect: "Allow" - Principal: - AWS: - - "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" - - "{{ metastore_access_role_arn }}" - Action: "sts:AssumeRole" - Condition: - StringEquals: - sts:ExternalId: "{{ storage_credential_external_id }}" + - name: name + value: interoperability_service_principal + sql: | + INSERT INTO databricks_account.oauth.service_principal_secrets ( + account_id, + service_principal_id + ) + SELECT + '{{ databricks_account_id }}', + '{{ service_principal_id }}' + RETURNING secret + ; + exports: + - secret + # protected: + # - secret # ==================================================================================== -# DBX UC Catalog, Schema, and Table +# (Snowflake) Create Catalog Integration and Related Objects # ==================================================================================== + + - name: snowflake_catalog_integration + type: command + file: snowflake/statement.iql + props: + - name: '"User-Agent"' + value: stackql + - name: statement + value: | + CREATE OR REPLACE CATALOG INTEGRATION unity_catalog_demo_int + CATALOG_SOURCE = ICEBERG_REST + TABLE_FORMAT = ICEBERG + CATALOG_NAMESPACE = ''{{ schema_name }}'' + REST_CONFIG = ( + CATALOG_URI = ''https://{{ databricks_deployment_name }}.cloud.databricks.com/api/2.1/unity-catalog/iceberg-rest'' + WAREHOUSE = ''{{ catalog_name }}'' + ACCESS_DELEGATION_MODE = VENDED_CREDENTIALS + ) + REST_AUTHENTICATION = ( + TYPE = OAUTH + OAUTH_TOKEN_URI = ''https://{{ databricks_deployment_name }}.cloud.databricks.com/oidc/v1/token'' + OAUTH_CLIENT_ID = ''{{ service_principal_application_id }}'' + OAUTH_CLIENT_SECRET = ''{{ secret }}'' + OAUTH_ALLOWED_SCOPES = (''all-apis'', ''sql'') + ) + ENABLED = TRUE + REFRESH_INTERVAL_SECONDS = 30 + - name: timeout + value: 10 + - name: database + value: '{{ snowflake_db }}' + - name: schema + value: '{{ snowflake_schema }}' + - name: warehouse + value: '{{ snowflake_whse }}' - - name: interoperability_catalog + - name: snowflake_iceberg_bronze_table + file: snowflake/statement.iql + type: command props: - - name: name - value: string - - name: comment - value: string - - name: properties - value: - property1: string - property2: string - - name: storage_root - value: string - - name: provider_name - value: string - - name: share_name - value: string - - name: connection_name - value: string - - name: options - value: - property1: string - property2: string + - name: '"User-Agent"' + value: stackql + - name: statement + value: | + CREATE OR REPLACE ICEBERG TABLE retail_sales_bronze + CATALOG = ''unity_catalog_demo_int'' + CATALOG_TABLE_NAME = ''retail_sales_bronze'' + AUTO_REFRESH = TRUE + - name: timeout + value: 30 + - name: database + value: '{{ snowflake_db }}' + - name: schema + value: '{{ snowflake_schema }}' + - name: warehouse + value: '{{ snowflake_whse }}' - - name: interoperability_schema + - name: snowflake_delta_silver_table + file: snowflake/statement.iql + type: command props: - - name: name - value: string - - name: catalog_name - value: string - - name: comment - value: string - - name: properties - value: - property1: string - property2: string - - name: storage_root - value: string + - name: '"User-Agent"' + value: stackql + - name: statement + value: | + CREATE OR REPLACE ICEBERG TABLE retail_sales_silver + CATALOG = ''unity_catalog_demo_int'' + CATALOG_TABLE_NAME = ''retail_sales_silver'' + AUTO_REFRESH = TRUE + - name: timeout + value: 30 + - name: database + value: '{{ snowflake_db }}' + - name: schema + value: '{{ snowflake_schema }}' + - name: warehouse + value: '{{ snowflake_whse }}' + + - name: snowflake_delta_gold_table + file: snowflake/statement.iql + type: command + props: + - name: '"User-Agent"' + value: stackql + - name: statement + value: | + CREATE OR REPLACE ICEBERG TABLE retail_sales_gold + CATALOG = ''unity_catalog_demo_int'' + CATALOG_TABLE_NAME = ''retail_sales_gold'' + AUTO_REFRESH = TRUE + - name: timeout + value: 30 + - name: database + value: '{{ snowflake_db }}' + - name: schema + value: '{{ snowflake_schema }}' + - name: warehouse + value: '{{ snowflake_whse }}' diff --git a/setup.py b/setup.py index 2860f2d..c6388fa 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from setuptools import setup, find_packages +from setuptools import setup, find_namespace_packages with open('README.rst', encoding='utf-8') as f: readme = f.read() @@ -10,7 +10,7 @@ setup( name='stackql-deploy', - version='1.8.5', + version='1.8.6', description='Model driven resource provisioning and deployment framework using StackQL.', long_description=readme, long_description_content_type='text/x-rst', @@ -18,7 +18,7 @@ author_email='javen@stackql.io', url='https://github.com/stackql/stackql-deploy', license='MIT', - packages=find_packages(), + packages=find_namespace_packages(include=['stackql_deploy*']), package_data={ 'stackql_deploy': [ 'templates/**/*.template', # Include template files recursively @@ -49,6 +49,6 @@ 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', - 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3.13', ] ) diff --git a/stackql_deploy/__init__.py b/stackql_deploy/__init__.py index 31e8d61..7bb2219 100644 --- a/stackql_deploy/__init__.py +++ b/stackql_deploy/__init__.py @@ -1 +1 @@ -__version__ = '1.8.5' +__version__ = '1.8.6' diff --git a/stackql_deploy/cmd/build.py b/stackql_deploy/cmd/build.py index 606d049..ee35fa2 100644 --- a/stackql_deploy/cmd/build.py +++ b/stackql_deploy/cmd/build.py @@ -74,9 +74,14 @@ def run(self, dry_run, show_queries, on_failure): # # get resource queries # - if type == 'command' and 'sql' in resource: - # command type resource with inline SQL + if (type == 'command' or type == 'query') and 'sql' in resource: + # inline SQL specified in the resource resource_queries = {} + inline_query = render_inline_template(self.env, + resource["name"], + resource["sql"], + full_context, + self.logger) else: resource_queries = get_queries(self.env, self.stack_dir, @@ -129,7 +134,16 @@ def run(self, dry_run, show_queries, on_failure): exports_retry_delay = resource_queries.get('exports', {}).get('options', {}).get('retry_delay', 0) if type == 'query' and not exports_query: - catch_error_and_exit("iql file must include 'exports' anchor for query type resources.", self.logger) + if 'sql' in resource: + exports_query = inline_query + exports_retries = 1 + exports_retry_delay = 0 + else: + catch_error_and_exit( + "inline sql must be supplied or an iql file must be present with an " + "'exports' anchor for query type resources.", + self.logger + ) if type in ('resource', 'multi'): @@ -177,16 +191,21 @@ def run(self, dry_run, show_queries, on_failure): # state check # if resource_exists and not is_correct_state: - is_correct_state = self.check_if_resource_is_correct_state( - is_correct_state, - resource, - full_context, - statecheck_query, - statecheck_retries, - statecheck_retry_delay, - dry_run, - show_queries - ) + # bypass state check if skip_validation is set to true + if resource.get('skip_validation', False): + self.logger.info(f"skipping validation for [{resource['name']}] as skip_validation is set to true.") + is_correct_state = True + elif statecheck_query: + is_correct_state = self.check_if_resource_is_correct_state( + is_correct_state, + resource, + full_context, + statecheck_query, + statecheck_retries, + statecheck_retry_delay, + dry_run, + show_queries + ) # # resource does not exist @@ -249,11 +268,7 @@ def run(self, dry_run, show_queries, on_failure): if type == 'command': # command queries if 'sql' in resource: - command_query = render_inline_template(self.env, - resource["name"], - resource["sql"], - full_context, - self.logger) + command_query = inline_query command_retries = 1 command_retry_delay = 0 else: diff --git a/stackql_deploy/cmd/teardown.py b/stackql_deploy/cmd/teardown.py index db3d022..6b3e4ac 100644 --- a/stackql_deploy/cmd/teardown.py +++ b/stackql_deploy/cmd/teardown.py @@ -5,7 +5,7 @@ get_type ) from ..lib.config import get_full_context, render_value -from ..lib.templating import get_queries +from ..lib.templating import get_queries, render_inline_template from .base import StackQLBase class StackQLDeProvisioner(StackQLBase): @@ -15,17 +15,34 @@ def collect_exports(self, show_queries, dry_run): for resource in self.manifest.get('resources', []): + type = get_type(resource, self.logger) + self.logger.info(f"getting exports for resource [{resource['name']}]") # get full context full_context = get_full_context(self.env, self.global_context, resource, self.logger) # get resource queries - test_queries = get_queries(self.env, self.stack_dir, 'resources', resource, full_context, self.logger) - - exports_query = test_queries.get('exports', {}).get('rendered') - exports_retries = test_queries.get('exports', {}).get('options', {}).get('retries', 1) - exports_retry_delay = test_queries.get('exports', {}).get('options', {}).get('retry_delay', 0) + if type == 'query' and 'sql' in resource: + # inline SQL specified in the resource + test_queries = {} + exports_query = render_inline_template(self.env, + resource["name"], + resource["sql"], + full_context, + self.logger) + exports_retries = 1 + exports_retry_delay = 0 + else: + test_queries = get_queries(self.env, + self.stack_dir, + 'resources', + resource, + full_context, + self.logger) + exports_query = test_queries.get('exports', {}).get('rendered') + exports_retries = test_queries.get('exports', {}).get('options', {}).get('retries', 1) + exports_retry_delay = test_queries.get('exports', {}).get('options', {}).get('retry_delay', 0) if exports_query: self.process_exports( diff --git a/stackql_deploy/cmd/test.py b/stackql_deploy/cmd/test.py index dbd19b0..03226ae 100644 --- a/stackql_deploy/cmd/test.py +++ b/stackql_deploy/cmd/test.py @@ -5,7 +5,7 @@ get_type ) from ..lib.config import get_full_context -from ..lib.templating import get_queries +from ..lib.templating import get_queries, render_inline_template from .base import StackQLBase class StackQLTestRunner(StackQLBase): @@ -36,7 +36,23 @@ def run(self, dry_run, show_queries, on_failure): # # get test queries # - test_queries = get_queries(self.env, self.stack_dir, 'resources', resource, full_context, self.logger) + if type == 'query' and 'sql' in resource: + # inline SQL specified in the resource + test_queries = {} + inline_query = render_inline_template(self.env, + resource["name"], + resource["sql"], + full_context, + self.logger) + else: + test_queries = get_queries(self.env, + self.stack_dir, + 'resources', + resource, + full_context, + self.logger) + + statecheck_query = test_queries.get('statecheck', {}).get('rendered') statecheck_retries = test_queries.get('statecheck', {}).get('options', {}).get('retries', 1) @@ -47,23 +63,33 @@ def run(self, dry_run, show_queries, on_failure): exports_retry_delay = test_queries.get('exports', {}).get('options', {}).get('retry_delay', 0) if type == 'query' and not exports_query: - catch_error_and_exit("iql file must include 'exports' anchor for query type resources.", self.logger) - + if 'sql' in resource: + exports_query = inline_query + exports_retries = 1 + exports_retry_delay = 0 + else: + catch_error_and_exit( + "inline sql must be supplied or an iql file must be present with an " + "'exports' anchor for query type resources.", + self.logger + ) # # statecheck check # if type in ('resource', 'multi'): - - is_correct_state = self.check_if_resource_is_correct_state( - False, - resource, - full_context, - statecheck_query, - statecheck_retries, - statecheck_retry_delay, - dry_run, - show_queries - ) + if 'skip_validation' in resource: + self.logger.info(f"Skipping statecheck for {resource['name']}") + else: + is_correct_state = self.check_if_resource_is_correct_state( + False, + resource, + full_context, + statecheck_query, + statecheck_retries, + statecheck_retry_delay, + dry_run, + show_queries + ) if not is_correct_state and not dry_run: catch_error_and_exit(f"❌ test failed for {resource['name']}.", self.logger) diff --git a/website/docs/manifest-file.md b/website/docs/manifest-file.md index 52d2946..89256e0 100644 --- a/website/docs/manifest-file.md +++ b/website/docs/manifest-file.md @@ -123,6 +123,18 @@ the fields within the __`stackql_manifest.yml`__ file are described in further d *** +### `resource.sql` + + + +*** + +### `resource.skip_validation` + + + +*** + ### `resource.props` diff --git a/website/docs/manifest_fields/index.js b/website/docs/manifest_fields/index.js index 9609727..d00069d 100644 --- a/website/docs/manifest_fields/index.js +++ b/website/docs/manifest_fields/index.js @@ -16,6 +16,7 @@ export { default as ResourceProtected } from "./resources/protected.mdx"; export { default as ResourceAuth } from "./resources/auth.mdx"; export { default as ResourceIf } from "./resources/if.mdx"; export { default as ResourceSql } from "./resources/sql.mdx"; +export { default as ResourceSkipValidation } from "./resources/skipvalidation.mdx"; export { default as ResourcePropName } from "./resources/props/name.mdx"; export { default as ResourcePropDescription } from "./resources/props/description.mdx"; export { default as ResourcePropValue } from "./resources/props/value.mdx"; diff --git a/website/docs/manifest_fields/resources.mdx b/website/docs/manifest_fields/resources.mdx index 9b498d5..24f5281 100644 --- a/website/docs/manifest_fields/resources.mdx +++ b/website/docs/manifest_fields/resources.mdx @@ -13,7 +13,9 @@ import LeftAlignedTable from '@site/src/components/LeftAlignedTable'; { name: 'resource.protected', anchor: 'resourceprotected' }, { name: 'resource.description', anchor: 'resourcedescription' }, { name: 'resource.if', anchor: 'resourceif' }, + { name: 'resource.auth', anchor: 'resourceauth' }, { name: 'resource.sql', anchor: 'resourcesql' }, + { name: 'resource.skipvalidation', anchor: 'resourceskipvalidation' }, ]} /> diff --git a/website/docs/manifest_fields/resources/skipvalidation.mdx b/website/docs/manifest_fields/resources/skipvalidation.mdx new file mode 100644 index 0000000..51313c8 --- /dev/null +++ b/website/docs/manifest_fields/resources/skipvalidation.mdx @@ -0,0 +1,45 @@ +import File from '@site/src/components/File'; +import LeftAlignedTable from '@site/src/components/LeftAlignedTable'; + + + +When set to `true`, the `test` and `build` commands will bypass validation checks for this specific resource. This is particularly useful for resources that are initially created with placeholder values and later updated within the same stack. + + + +```yaml {19} +resources: + - name: aws/iam/metastore_access_role + file: aws/iam/iam_role.iql + props: + - name: role_name + value: "{{ stack_name }}-{{ stack_env }}-metastore-role" + - name: assume_role_policy_document + value: + Version: "2012-10-17" + Statement: + - Effect: "Allow" + Principal: + AWS: + - "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" + Action: "sts:AssumeRole" + Condition: + StringEquals: + sts:ExternalId: "0000" # Placeholder + skip_validation: true + exports: + - aws_iam_role_arn: metastore_access_role_arn +``` + + + +:::info + +- Use `skip_validation: true` when you need to create a resource with temporary configuration that will be updated later in the stack execution +- Common use cases include: + - Creating IAM roles with placeholder external IDs that will be updated once another dependent resource is created + - Setting up initial placeholder credentials that will be modified in a subsequent step + - Creating resources with circular dependencies where initial validation would fail +- This flag only affects the `test` and `build` commands + +::: \ No newline at end of file diff --git a/website/docs/manifest_fields/resources/sql.mdx b/website/docs/manifest_fields/resources/sql.mdx index 3c2ee32..5e650f6 100644 --- a/website/docs/manifest_fields/resources/sql.mdx +++ b/website/docs/manifest_fields/resources/sql.mdx @@ -3,7 +3,7 @@ import LeftAlignedTable from '@site/src/components/LeftAlignedTable'; -For `command` type resources, you can include SQL statements directly in your resource manifest using the `sql` key. This allows you to write custom SQL commands without needing a separate IQL file. +For `command` and `query` type resources, you can include SQL statements directly in your resource manifest using the `sql` key. This allows you to write custom SQL commands without needing a separate IQL file. ```yaml {5-11} @@ -23,7 +23,7 @@ resources: :::info -- The `sql` key is only supported to `command` type resources +- The `sql` key is only supported to `command` and `query` type resources - For command resources, either `sql` or a corresponding IQL file with a `command` anchor must be provided, if `sql` is supplied in the manifest this will be used - The `sql` key accepts a string containing the SQL statement to execute - You can use multi-line strings with the YAML pipe (`|`) character for better readability From 03c231c69328c44cf1441a6c8ba5d5f2b8a5f7f1 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Thu, 24 Jul 2025 15:32:43 +1000 Subject: [PATCH 2/5] added sql_escape filter --- .../databricks_workspace/catalog.iql | 5 - .../resources/databricks_workspace/schema.iql | 5 - .../resources/snowflake/statement.iql | 2 +- .../stackql_manifest.yml | 28 +-- stackql_deploy/cmd/teardown.py | 41 ++-- stackql_deploy/lib/filters.py | 24 ++- stackql_deploy/lib/templating.py | 4 +- website/docs/resource-query-files.md | 112 +---------- website/docs/template-filters.md | 183 ++++++++++++++++++ 9 files changed, 251 insertions(+), 153 deletions(-) create mode 100644 website/docs/template-filters.md diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql index 79424d2..5d7df7e 100644 --- a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql +++ b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/catalog.iql @@ -25,8 +25,3 @@ SELECT name as catalog_name FROM databricks_workspace.unitycatalog.catalogs WHERE name = '{{ name }}' AND deployment_name = '{{ databricks_deployment_name }}'; - -/*+ delete */ -DELETE FROM databricks_workspace.unitycatalog.catalogs -WHERE name = '{{ name }}' AND -deployment_name = '{{ deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql index bb5f401..b662259 100644 --- a/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql +++ b/examples/databricks/snowflake-interoperability/resources/databricks_workspace/schema.iql @@ -25,8 +25,3 @@ FROM databricks_workspace.unitycatalog.schemas WHERE deployment_name = '{{ databricks_deployment_name }}' AND catalog_name = '{{ catalog_name }}' AND name = '{{ name }}'; - -/*+ delete */ -DELETE FROM databricks_workspace.unitycatalog.schemas -WHERE full_name = '{{ name }}' AND -deployment_name = '{{ databricks_deployment_name }}'; \ No newline at end of file diff --git a/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql b/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql index dfbe1ab..decce8c 100644 --- a/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql +++ b/examples/databricks/snowflake-interoperability/resources/snowflake/statement.iql @@ -9,7 +9,7 @@ data__warehouse, endpoint ) SELECT -'{{ statement }}', +'{{ statement | sql_escape }}', {{ timeout }}, '{{ database }}', '{{ schema }}', diff --git a/examples/databricks/snowflake-interoperability/stackql_manifest.yml b/examples/databricks/snowflake-interoperability/stackql_manifest.yml index e5cbe1f..c5d0e2c 100644 --- a/examples/databricks/snowflake-interoperability/stackql_manifest.yml +++ b/examples/databricks/snowflake-interoperability/stackql_manifest.yml @@ -195,21 +195,21 @@ resources: value: stackql - name: statement value: | - CREATE OR REPLACE CATALOG INTEGRATION unity_catalog_demo_int + CREATE CATALOG INTEGRATION IF NOT EXISTS unity_catalog_demo_int CATALOG_SOURCE = ICEBERG_REST TABLE_FORMAT = ICEBERG - CATALOG_NAMESPACE = ''{{ schema_name }}'' + CATALOG_NAMESPACE = '{{ schema_name }}' REST_CONFIG = ( - CATALOG_URI = ''https://{{ databricks_deployment_name }}.cloud.databricks.com/api/2.1/unity-catalog/iceberg-rest'' - WAREHOUSE = ''{{ catalog_name }}'' + CATALOG_URI = 'https://{{ databricks_deployment_name }}.cloud.databricks.com/api/2.1/unity-catalog/iceberg-rest' + WAREHOUSE = '{{ catalog_name }}' ACCESS_DELEGATION_MODE = VENDED_CREDENTIALS ) REST_AUTHENTICATION = ( TYPE = OAUTH - OAUTH_TOKEN_URI = ''https://{{ databricks_deployment_name }}.cloud.databricks.com/oidc/v1/token'' - OAUTH_CLIENT_ID = ''{{ service_principal_application_id }}'' - OAUTH_CLIENT_SECRET = ''{{ secret }}'' - OAUTH_ALLOWED_SCOPES = (''all-apis'', ''sql'') + OAUTH_TOKEN_URI = 'https://{{ databricks_deployment_name }}.cloud.databricks.com/oidc/v1/token' + OAUTH_CLIENT_ID = '{{ service_principal_application_id }}' + OAUTH_CLIENT_SECRET = '{{ secret }}' + OAUTH_ALLOWED_SCOPES = ('all-apis', 'sql') ) ENABLED = TRUE REFRESH_INTERVAL_SECONDS = 30 @@ -231,8 +231,8 @@ resources: - name: statement value: | CREATE OR REPLACE ICEBERG TABLE retail_sales_bronze - CATALOG = ''unity_catalog_demo_int'' - CATALOG_TABLE_NAME = ''retail_sales_bronze'' + CATALOG = 'unity_catalog_demo_int' + CATALOG_TABLE_NAME = 'retail_sales_bronze' AUTO_REFRESH = TRUE - name: timeout value: 30 @@ -252,8 +252,8 @@ resources: - name: statement value: | CREATE OR REPLACE ICEBERG TABLE retail_sales_silver - CATALOG = ''unity_catalog_demo_int'' - CATALOG_TABLE_NAME = ''retail_sales_silver'' + CATALOG = 'unity_catalog_demo_int' + CATALOG_TABLE_NAME = 'retail_sales_silver' AUTO_REFRESH = TRUE - name: timeout value: 30 @@ -273,8 +273,8 @@ resources: - name: statement value: | CREATE OR REPLACE ICEBERG TABLE retail_sales_gold - CATALOG = ''unity_catalog_demo_int'' - CATALOG_TABLE_NAME = ''retail_sales_gold'' + CATALOG = 'unity_catalog_demo_int' + CATALOG_TABLE_NAME = 'retail_sales_gold' AUTO_REFRESH = TRUE - name: timeout value: 30 diff --git a/stackql_deploy/cmd/teardown.py b/stackql_deploy/cmd/teardown.py index 6b3e4ac..c2045d0 100644 --- a/stackql_deploy/cmd/teardown.py +++ b/stackql_deploy/cmd/teardown.py @@ -23,26 +23,27 @@ def collect_exports(self, show_queries, dry_run): full_context = get_full_context(self.env, self.global_context, resource, self.logger) # get resource queries - if type == 'query' and 'sql' in resource: - # inline SQL specified in the resource - test_queries = {} - exports_query = render_inline_template(self.env, - resource["name"], - resource["sql"], - full_context, - self.logger) - exports_retries = 1 - exports_retry_delay = 0 - else: - test_queries = get_queries(self.env, - self.stack_dir, - 'resources', - resource, - full_context, - self.logger) - exports_query = test_queries.get('exports', {}).get('rendered') - exports_retries = test_queries.get('exports', {}).get('options', {}).get('retries', 1) - exports_retry_delay = test_queries.get('exports', {}).get('options', {}).get('retry_delay', 0) + if type != 'command': + if type == 'query' and 'sql' in resource: + # inline SQL specified in the resource + test_queries = {} + exports_query = render_inline_template(self.env, + resource["name"], + resource["sql"], + full_context, + self.logger) + exports_retries = 1 + exports_retry_delay = 0 + else: + test_queries = get_queries(self.env, + self.stack_dir, + 'resources', + resource, + full_context, + self.logger) + exports_query = test_queries.get('exports', {}).get('rendered') + exports_retries = test_queries.get('exports', {}).get('options', {}).get('retries', 1) + exports_retry_delay = test_queries.get('exports', {}).get('options', {}).get('retry_delay', 0) if exports_query: self.process_exports( diff --git a/stackql_deploy/lib/filters.py b/stackql_deploy/lib/filters.py index dad5a8c..629347a 100644 --- a/stackql_deploy/lib/filters.py +++ b/stackql_deploy/lib/filters.py @@ -109,6 +109,25 @@ def sql_list(input_data): quoted_items = [f"'{str(item)}'" for item in python_list] return f"({','.join(quoted_items)})" +def sql_escape(value): + """ + Escapes a string for use as a SQL string literal by doubling any single quotes. + This is useful for nested SQL statements where single quotes need to be escaped. + + Args: + value: The string to escape + + Returns: + The escaped string with single quotes doubled + """ + if value is None: + return None + + if not isinstance(value, str): + value = str(value) + + return value.replace("'", "''") + # # exported functions # @@ -121,11 +140,12 @@ def setup_environment(stack_dir, logger): loader=FileSystemLoader(os.getcwd()), autoescape=False ) - env.filters['merge_lists'] = merge_lists + env.filters['from_json'] = from_json env.filters['base64_encode'] = base64_encode + env.filters['merge_lists'] = merge_lists env.filters['generate_patch_document'] = generate_patch_document - env.filters['from_json'] = from_json env.filters['sql_list'] = sql_list + env.filters['sql_escape'] = sql_escape env.globals['uuid'] = lambda: str(uuid.uuid4()) logger.debug("custom Jinja filters registered: %s", env.filters.keys()) return env diff --git a/stackql_deploy/lib/templating.py b/stackql_deploy/lib/templating.py index a08ea81..92fd3ad 100644 --- a/stackql_deploy/lib/templating.py +++ b/stackql_deploy/lib/templating.py @@ -38,7 +38,7 @@ def render_queries(res_name, env, queries, context, logger): properties, ensure_ascii=False, separators=(',', ':') ).replace('True', 'true').replace('False', 'false') # Correctly format JSON to use double quotes and pass directly since template handles quoting - json_str = json_str.replace("'", "\\'") # escape single quotes if any within strings + # json_str = json_str.replace("'", "\\'") # escape single quotes if any within strings temp_context[ctx_key] = json_str # No need to alter non-JSON strings, assume the template handles them correctly @@ -147,7 +147,7 @@ def render_inline_template(env, resource_name, template_string, full_context, lo properties, ensure_ascii=False, separators=(',', ':') ).replace('True', 'true').replace('False', 'false') # Correctly format JSON to use double quotes and pass directly since template handles quoting - json_str = json_str.replace("'", "\\'") # escape single quotes if any within strings + # json_str = json_str.replace("'", "\\'") # escape single quotes if any within strings temp_context[ctx_key] = json_str # Render the template diff --git a/website/docs/resource-query-files.md b/website/docs/resource-query-files.md index ba119d1..b798b30 100644 --- a/website/docs/resource-query-files.md +++ b/website/docs/resource-query-files.md @@ -206,113 +206,17 @@ AND project = '{{ project }}' AND zone = '{{ zone }}' ``` -## Template filters +## Template Filters -### `from_json` -`from_json` is a custom `stackql-deploy` filter which is used to convert a `json` string to a python dictionary or list. The common use case is to take advantage of Jinja2 templating within `stackql-deploy` to dynamically generate SQL statements for infrastructure provisioning by converting JSON strings into Python dictionaries or lists, allowing for iteration and more flexible configuration management. +StackQL Deploy leverages Jinja2 templating capabilities and extends them with custom filters for infrastructure provisioning. For a complete reference of all available filters, see the [__Template Filters__](template-filters) documentation. -```sql {2} -/*+ create */ -{% for network_interface in network_interfaces | from_json %} -INSERT INTO google.compute.instances - ( - zone, - project, - data__name, - data__machineType, - data__canIpForward, - data__deletionProtection, - data__scheduling, - data__networkInterfaces, - data__disks, - data__serviceAccounts, - data__tags - ) - SELECT -'{{ default_zone }}', -'{{ project }}', -'{{ instance_name_prefix }}-{{ loop.index }}', -'{{ machine_type }}', -true, -false, -'{{ scheduling }}', -'[ {{ network_interface | tojson }} ]', -'{{ disks }}', -'{{ service_accounts }}', -'{{ tags }}'; -{% endfor %} -``` - -### `tojson` -`tojson` is a built-in Jinja2 filter to convert a Python dictionary or list into a `json` string. This may be required if you have used the `from_json` filter as shown here: - -```sql {25} -/*+ create */ -{% for network_interface in network_interfaces | from_json %} -INSERT INTO google.compute.instances - ( - zone, - project, - data__name, - data__machineType, - data__canIpForward, - data__deletionProtection, - data__scheduling, - data__networkInterfaces, - data__disks, - data__serviceAccounts, - data__tags - ) - SELECT -'{{ default_zone }}', -'{{ project }}', -'{{ instance_name_prefix }}-{{ loop.index }}', -'{{ machine_type }}', -true, -false, -'{{ scheduling }}', -'[ {{ network_interface | tojson }} ]', -'{{ disks }}', -'{{ service_accounts }}', -'{{ tags }}'; -{% endfor %} -``` - -### `generate_patch_document` - -`generate_patch_document` is a custom `stackql-deploy` filter which generates a patch document for the given resource according to https://datatracker.ietf.org/doc/html/rfc6902, this is designed for the AWS Cloud Control API, which requires a patch document to update resources. An example of this filter used to update the `NotificationConfiguration` for an existing AWS bucket is shown here: - -```sql {3-5} -/*+ createorupdate */ -update aws.s3.buckets -set data__PatchDocument = string('{{ { - "NotificationConfiguration": transfer_notification_config - } | generate_patch_document }}') -WHERE -region = '{{ region }}' -AND data__Identifier = '{{ transfer_bucket_name }}'; -``` - -### `base64_encode` +Here are a few commonly used filters: -`base64_encode` is a custom `stackql-deploy` filter used to generate a `base64` encoded value for a given input string, this is often specified as an input requirement for a free text field in a resource. This example shows how to `base64` encode the `UserData` field for an AWS EC2 instance: - -```sql {13} -/*+ create */ -INSERT INTO aws.ec2.instances ( - ImageId, - InstanceType, - SubnetId, - UserData, - region -) -SELECT - '{{ ami_id }}', - '{{ instance_type }}', - '{{ instance_subnet_id }}', - '{{ user_data | base64_encode }}', - '{{ region }}'; -``` +- `from_json` - Converts JSON strings to Python objects for iteration and manipulation +- `tojson` - Converts Python objects back to JSON strings +- `sql_escape` - Properly escapes SQL string literals for nested SQL statements +- `generate_patch_document` - Creates RFC6902-compliant patch documents for AWS resources +- `base64_encode` - Encodes strings as base64 for API fields requiring binary data ## Examples diff --git a/website/docs/template-filters.md b/website/docs/template-filters.md new file mode 100644 index 0000000..2967586 --- /dev/null +++ b/website/docs/template-filters.md @@ -0,0 +1,183 @@ +--- +id: template-filters +title: Template Filters +hide_title: false +hide_table_of_contents: false +description: Custom and built-in Jinja2 filters available in StackQL Deploy for template processing +tags: [] +draft: false +unlisted: false +--- + +import File from '/src/components/File'; + +# Template Filters + +StackQL Deploy leverages Jinja2 templating capabilities and extends them with custom filters specifically designed for infrastructure provisioning use cases. These filters help transform data between formats, encode values, generate specialized document formats, and perform other common operations required in IaC configurations. + +## Available Filters + +### `from_json` + +Converts a JSON string to a Python dictionary or list. This is commonly used to enable iteration over complex data structures in templates. + +**Example usage:** + +```sql +{% for network_interface in network_interfaces | from_json %} +INSERT INTO google.compute.instances + ( + /* fields... */ + ) + SELECT +'{{ instance_name_prefix }}-{{ loop.index }}', +/* other values... */ +'[ {{ network_interface | tojson }} ]'; +{% endfor %} +``` + +### `tojson` + +A built-in Jinja2 filter that converts a Python dictionary or list into a JSON string. Often used in conjunction with `from_json` when working with complex data structures. + +**Example usage:** + +```sql +'[ {{ network_interface | tojson }} ]' +``` + +### `generate_patch_document` + +Generates a patch document according to [RFC6902](https://datatracker.ietf.org/doc/html/rfc6902), primarily designed for the AWS Cloud Control API which requires patch documents for resource updates. + +**Example usage:** + +```sql +update aws.s3.buckets +set data__PatchDocument = string('{{ { + "NotificationConfiguration": transfer_notification_config + } | generate_patch_document }}') +WHERE +region = '{{ region }}' +AND data__Identifier = '{{ bucket_name }}'; +``` + +### `base64_encode` + +Encodes a string as base64, which is commonly required for certain API fields that accept binary data. + +**Example usage:** + +```sql +INSERT INTO aws.ec2.instances ( + /* fields... */ + UserData, + region +) +SELECT + /* values... */ + '{{ user_data | base64_encode }}', + '{{ region }}'; +``` + +### `sql_list` + +Converts a Python list or a JSON array string into a SQL-compatible list format with proper quoting, suitable for use in SQL IN clauses. + +**Example usage:** + +```sql +SELECT * FROM aws.ec2.instances +WHERE region = '{{ region }}' +AND InstanceId IN {{ instance_ids | sql_list }} +``` + +### `sql_escape` + +Escapes a string for use as a SQL string literal by doubling any single quotes. This is particularly useful for nested SQL statements where quotes need special handling. + +**Example usage:** + +```sql +INSERT INTO snowflake.sqlapi.statements ( +data__statement, +/* other fields... */ +) +SELECT +'{{ statement | sql_escape }}', +/* other values... */ +; +``` + +### `merge_lists` + +Merges two lists (or JSON-encoded list strings) into a single list with unique items. + +**Example usage:** + +```sql +{% set combined_policies = default_policies | merge_lists(custom_policies) %} +INSERT INTO aws.iam.policies ( + /* fields... */ + PolicyDocument, + /* other fields... */ +) +SELECT + /* values... */ + '{{ combined_policies | tojson }}', + /* other values... */ +; +``` + +### `merge_objects` + +Merges two dictionaries (or JSON-encoded object strings) into a single dictionary. In case of duplicate keys, values from the second dictionary take precedence. + +**Example usage:** + +```sql +{% set complete_config = base_config | merge_objects(environment_specific_config) %} +INSERT INTO aws.lambda.functions ( + /* fields... */ + Environment, + /* other fields... */ +) +SELECT + /* values... */ + '{{ complete_config | tojson }}', + /* other values... */ +; +``` + +## Global Functions + +### `uuid` + +Generates a random UUID (version 4). Useful for creating unique identifiers. + +**Example usage:** + +```sql +INSERT INTO aws.s3.buckets ( + /* fields... */ + data__BucketName, + /* other fields... */ +) +SELECT + /* values... */ + '{{ stack_name }}-{{ uuid() }}', + /* other values... */ +; +``` + +## Filter Chaining + +Filters can be chained together to perform multiple transformations in sequence: + +```sql +'{{ user_config | from_json | merge_objects(default_config) | tojson | base64_encode }}' +``` + +## Custom Filter Development + +The StackQL Deploy filtering system is extensible. If you need additional filters for your specific use case, you can contribute to the project by adding new filters to the `lib/filters.py` file. \ No newline at end of file From 5d45479742938e1f011a59f18b0f3ce0604bb28a Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Thu, 24 Jul 2025 15:33:14 +1000 Subject: [PATCH 3/5] added sql_escape filter --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bca085..bcd40c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## 1.8.6 (2025-07-22) - Added support for inline `sql` for `command` and `query` resource types +- Added `sql_escape` filter ## 1.8.5 (2025-06-30) From 8fc90eaebf18f756a2ea27d4b3608a955af003b2 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Wed, 13 Aug 2025 12:11:09 +1000 Subject: [PATCH 4/5] website updates --- website/docs/cli-reference/init.md | 2 +- .../docs/manifest_fields/resources/auth.mdx | 2 +- .../aws/vpc-and-ec2-instance.md | 2 +- .../azure/simple-vnet-and-vm.md | 2 +- .../google/k8s-the-hard-way.md | 2 +- website/docusaurus.config.js | 179 ++++++++++-------- website/src/pages/{home.js => install.js} | 4 +- .../src/pages/{registry => providers}/aws.js | 2 +- .../pages/{registry => providers}/azure.js | 2 +- website/src/pages/providers/confluent.js | 10 + website/src/pages/providers/databricks.js | 10 + .../pages/{registry => providers}/github.js | 2 +- .../pages/{registry => providers}/google.js | 2 +- .../pages/{features.js => providers/index.js} | 18 +- .../src/pages/{registry => providers}/okta.js | 2 +- website/src/pages/providers/openai.js | 10 + website/src/pages/providers/snowflake.js | 10 + website/src/pages/registry/awscc.js | 10 - website/src/pages/registry/digitalocean.js | 10 - website/src/pages/registry/index.js | 10 - website/src/pages/registry/k8s.js | 10 - website/src/pages/registry/linode.js | 10 - website/src/pages/stackql-deploy.js | 10 + .../src/pages/{downloads.js => tutorials.js} | 4 +- 24 files changed, 175 insertions(+), 150 deletions(-) rename website/src/pages/{home.js => install.js} (76%) rename website/src/pages/{registry => providers}/aws.js (60%) rename website/src/pages/{registry => providers}/azure.js (59%) create mode 100644 website/src/pages/providers/confluent.js create mode 100644 website/src/pages/providers/databricks.js rename website/src/pages/{registry => providers}/github.js (59%) rename website/src/pages/{registry => providers}/google.js (59%) rename website/src/pages/{features.js => providers/index.js} (72%) rename website/src/pages/{registry => providers}/okta.js (60%) create mode 100644 website/src/pages/providers/openai.js create mode 100644 website/src/pages/providers/snowflake.js delete mode 100644 website/src/pages/registry/awscc.js delete mode 100644 website/src/pages/registry/digitalocean.js delete mode 100644 website/src/pages/registry/index.js delete mode 100644 website/src/pages/registry/k8s.js delete mode 100644 website/src/pages/registry/linode.js create mode 100644 website/src/pages/stackql-deploy.js rename website/src/pages/{downloads.js => tutorials.js} (75%) diff --git a/website/docs/cli-reference/init.md b/website/docs/cli-reference/init.md index 249d72d..b3b58d7 100644 --- a/website/docs/cli-reference/init.md +++ b/website/docs/cli-reference/init.md @@ -69,7 +69,7 @@ stackql-deploy init my-stack ``` :::tip -`init` will create your project structure including the stack directory including the `stackql_manifest.yml` and `README.md` files, and a `resources` directory with a sample StackQL resource query file (`.iql` file). You can modify a project to use whichever providers are available in the [StackQL Provider Registry](https://registry.stackql.io/). +`init` will create your project structure including the stack directory including the `stackql_manifest.yml` and `README.md` files, and a `resources` directory with a sample StackQL resource query file (`.iql` file). You can modify a project to use whichever providers are available in the [StackQL Provider Registry](https://stackql.io/providers). ::: diff --git a/website/docs/manifest_fields/resources/auth.mdx b/website/docs/manifest_fields/resources/auth.mdx index 82ea7e3..e06920f 100644 --- a/website/docs/manifest_fields/resources/auth.mdx +++ b/website/docs/manifest_fields/resources/auth.mdx @@ -17,7 +17,7 @@ stackql-deploy upgrade -The `auth` object will depend upon the provider the resource belongs to, consult the provider documentation in the [StackQL Provider Registry Docs](https://stackql.io/registry). +The `auth` object will depend upon the provider the resource belongs to, consult the provider documentation in the [StackQL Provider Registry Docs](https://stackql.io/providers). ### Example Usage diff --git a/website/docs/template-library/aws/vpc-and-ec2-instance.md b/website/docs/template-library/aws/vpc-and-ec2-instance.md index 55221b8..9f93d6b 100644 --- a/website/docs/template-library/aws/vpc-and-ec2-instance.md +++ b/website/docs/template-library/aws/vpc-and-ec2-instance.md @@ -354,7 +354,7 @@ AND region = '{{ region }}'; The complete code for this example stack is available [__here__](https://github.com/stackql/stackql-deploy/tree/main/examples/aws/aws-stack). For more information on how to use StackQL and StackQL Deploy, visit: -- [`aws` provider docs](https://stackql.io/registry/aws) +- [`aws` provider docs](https://stackql.io/providers/aws) - [`stackql`](https://github.com/stackql) - [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) - [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) diff --git a/website/docs/template-library/azure/simple-vnet-and-vm.md b/website/docs/template-library/azure/simple-vnet-and-vm.md index be13342..c2a22ef 100644 --- a/website/docs/template-library/azure/simple-vnet-and-vm.md +++ b/website/docs/template-library/azure/simple-vnet-and-vm.md @@ -299,7 +299,7 @@ AND subnetName = '{{ subnet_name }}' The complete code for this example stack is available [__here__](https://github.com/stackql/stackql-deploy/tree/main/examples/azure/azure-stack). For more information on how to use StackQL and StackQL Deploy, visit: -- [`azure` provider docs](https://stackql.io/registry/azure) +- [`azure` provider docs](https://stackql.io/providers/azure) - [`stackql`](https://github.com/stackql) - [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) - [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) diff --git a/website/docs/template-library/google/k8s-the-hard-way.md b/website/docs/template-library/google/k8s-the-hard-way.md index a8332bf..1dcf2b4 100644 --- a/website/docs/template-library/google/k8s-the-hard-way.md +++ b/website/docs/template-library/google/k8s-the-hard-way.md @@ -424,7 +424,7 @@ AND firewall = '{{ fw_name }}' The complete code for this example stack is available [__here__](https://github.com/stackql/stackql-deploy/tree/main/examples/k8s-the-hard-way). For more information on how to use StackQL and StackQL Deploy, visit: -- [`google` provider docs](https://stackql.io/registry/google) +- [`google` provider docs](https://stackql.io/providers/google) - [`stackql`](https://github.com/stackql) - [`stackql-deploy` PyPI home page](https://pypi.org/project/stackql-deploy/) - [`stackql-deploy` GitHub repo](https://github.com/stackql/stackql-deploy) diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js index ddb6177..c56181f 100644 --- a/website/docusaurus.config.js +++ b/website/docusaurus.config.js @@ -6,6 +6,84 @@ import {themes as prismThemes} from 'prism-react-renderer'; +const providerDropDownListItems = [ + { + label: 'AWS', + to: '/providers/aws', + }, + { + label: 'Azure', + to: '/providers/azure', + }, + { + label: 'Google', + to: '/providers/google', + }, + { + label: 'Databricks', + to: '/providers/databricks', + }, + { + label: 'Snowflake', + to: '/providers/snowflake', + }, + { + label: 'Confluent', + to: '/providers/confluent', + }, + { + label: 'Okta', + to: '/providers/okta', + }, + { + label: 'GitHub', + to: '/providers/github', + }, + { + label: 'OpenAI', + to: '/providers/openai', + }, + { + label: '... More', + to: '/providers', + }, +]; + +const footerStackQLItems = [ + { + label: 'Documentation', + to: '/stackqldocs', + }, + { + label: 'Install', + to: '/install', + }, + { + label: 'Contact us', + to: '/contact-us', + }, +]; + +const footerMoreItems = [ + { + label: 'Providers', + to: '/providers', + }, + { + label: 'stackql-deploy', + to: '/stackql-deploy', + }, + { + label: 'Blog', + to: '/blog', + }, + { + label: 'Tutorials', + to: '/tutorials', + }, +]; + + /** @type {import('@docusaurus/types').Config} */ const config = { title: 'StackQL Deploy', @@ -64,7 +142,7 @@ const config = { ({ image: 'img/stackql-cover.png', navbar: { - logo: { + logo: { alt: 'StackQL Deploy', href: '/', src: 'img/stackql-deploy-logo.svg', @@ -77,61 +155,38 @@ const config = { // position: 'left', // label: 'Deploy Docs', // }, + { + to: '/install', + position: 'left', + label: 'Install', + }, { to: '/stackqldocs', position: 'left', label: 'StackQL Docs', }, { - to: '/registry', + to: '/providers', type: 'dropdown', - label: 'StackQL Providers', + label: 'Providers', position: 'left', - items: [ - { - label: 'AWS', - to: '/registry/aws', - }, - { - label: 'Azure', - to: '/registry/azure', - }, - { - label: 'Google', - to: '/registry/google', - }, - { - label: 'GitHub', - to: '/registry/github', - }, - { - label: 'Kubernetes', - to: '/registry/k8s', - }, - { - label: 'Okta', - to: '/registry/okta', - }, - { - label: 'DigitalOcean', - to: '/registry/digitalocean', - }, - { - label: 'Linode', - to: '/registry/linode', - }, - { - label: '... More', - to: '/registry', - }, - ] + items: providerDropDownListItems, }, { - to: '/downloads', + type: 'dropdown', + label: 'More', position: 'left', - label: 'Downloads', + items: [ + { + to: '/blog', + label: 'Blog', + }, + { + to: '/tutorials', + label: 'Tutorials', + }, + ], }, - // {to: '/blog', label: 'Blog', position: 'left'}, { href: 'https://github.com/stackql/stackql', position: 'right', @@ -144,48 +199,18 @@ const config = { style: 'dark', logo: { alt: 'StackQL', - href: 'https://stackql.io/', + href: '/', src: 'img/stackql-deploy-logo.svg', srcDark: 'img/stackql-deploy-logo-white.svg', }, links: [ { title: 'StackQL', - items: [ - { - label: 'Home', - to: '/home', - }, - { - label: 'Features', - to: '/features', - }, - { - label: 'Downloads', - to: '/downloads', - }, - { - label: 'Contact us', - href: '/contact-us', - }, - ], + items: footerStackQLItems, }, { title: 'More', - items: [ - { - label: 'StackQL Docs', - to: '/stackqldocs', - }, - { - label: 'Providers', - to: '/registry', - }, - { - label: 'Blog', - to: '/blog', - }, - ], + items: footerMoreItems, }, ], copyright: `© ${new Date().getFullYear()} StackQL Studios`, diff --git a/website/src/pages/home.js b/website/src/pages/install.js similarity index 76% rename from website/src/pages/home.js rename to website/src/pages/install.js index 18bef7c..a86db97 100644 --- a/website/src/pages/home.js +++ b/website/src/pages/install.js @@ -1,10 +1,10 @@ import React from 'react'; import Head from '@docusaurus/Head'; -export default function Home() { +export default function Install() { return ( - + ); }; \ No newline at end of file diff --git a/website/src/pages/registry/aws.js b/website/src/pages/providers/aws.js similarity index 60% rename from website/src/pages/registry/aws.js rename to website/src/pages/providers/aws.js index 3bdb1d8..780099a 100644 --- a/website/src/pages/registry/aws.js +++ b/website/src/pages/providers/aws.js @@ -4,7 +4,7 @@ import Head from '@docusaurus/Head'; export default function Registry() { return ( - + ); }; \ No newline at end of file diff --git a/website/src/pages/registry/azure.js b/website/src/pages/providers/azure.js similarity index 59% rename from website/src/pages/registry/azure.js rename to website/src/pages/providers/azure.js index a7c28ee..467f77a 100644 --- a/website/src/pages/registry/azure.js +++ b/website/src/pages/providers/azure.js @@ -4,7 +4,7 @@ import Head from '@docusaurus/Head'; export default function Registry() { return ( - + ); }; \ No newline at end of file diff --git a/website/src/pages/providers/confluent.js b/website/src/pages/providers/confluent.js new file mode 100644 index 0000000..e886aaf --- /dev/null +++ b/website/src/pages/providers/confluent.js @@ -0,0 +1,10 @@ +import React from 'react'; +import Head from '@docusaurus/Head'; + +export default function Registry() { + return ( + + + + ); +}; \ No newline at end of file diff --git a/website/src/pages/providers/databricks.js b/website/src/pages/providers/databricks.js new file mode 100644 index 0000000..a04b603 --- /dev/null +++ b/website/src/pages/providers/databricks.js @@ -0,0 +1,10 @@ +import React from 'react'; +import Head from '@docusaurus/Head'; + +export default function Registry() { + return ( + + + + ); +}; \ No newline at end of file diff --git a/website/src/pages/registry/github.js b/website/src/pages/providers/github.js similarity index 59% rename from website/src/pages/registry/github.js rename to website/src/pages/providers/github.js index 8825c68..b425c6c 100644 --- a/website/src/pages/registry/github.js +++ b/website/src/pages/providers/github.js @@ -4,7 +4,7 @@ import Head from '@docusaurus/Head'; export default function Registry() { return ( - + ); }; \ No newline at end of file diff --git a/website/src/pages/registry/google.js b/website/src/pages/providers/google.js similarity index 59% rename from website/src/pages/registry/google.js rename to website/src/pages/providers/google.js index 697ad66..01fe8b7 100644 --- a/website/src/pages/registry/google.js +++ b/website/src/pages/providers/google.js @@ -4,7 +4,7 @@ import Head from '@docusaurus/Head'; export default function Registry() { return ( - + ); }; \ No newline at end of file diff --git a/website/src/pages/features.js b/website/src/pages/providers/index.js similarity index 72% rename from website/src/pages/features.js rename to website/src/pages/providers/index.js index 8887bd1..20c17c6 100644 --- a/website/src/pages/features.js +++ b/website/src/pages/providers/index.js @@ -1,10 +1,10 @@ -import React from 'react'; -import Head from '@docusaurus/Head'; - -export default function Features() { - return ( - - - - ); +import React from 'react'; +import Head from '@docusaurus/Head'; + +export default function Registry() { + return ( + + + + ); }; \ No newline at end of file diff --git a/website/src/pages/registry/okta.js b/website/src/pages/providers/okta.js similarity index 60% rename from website/src/pages/registry/okta.js rename to website/src/pages/providers/okta.js index 0ce2390..cdddc72 100644 --- a/website/src/pages/registry/okta.js +++ b/website/src/pages/providers/okta.js @@ -4,7 +4,7 @@ import Head from '@docusaurus/Head'; export default function Registry() { return ( - + ); }; \ No newline at end of file diff --git a/website/src/pages/providers/openai.js b/website/src/pages/providers/openai.js new file mode 100644 index 0000000..9884c84 --- /dev/null +++ b/website/src/pages/providers/openai.js @@ -0,0 +1,10 @@ +import React from 'react'; +import Head from '@docusaurus/Head'; + +export default function Registry() { + return ( + + + + ); +}; \ No newline at end of file diff --git a/website/src/pages/providers/snowflake.js b/website/src/pages/providers/snowflake.js new file mode 100644 index 0000000..7b3ec43 --- /dev/null +++ b/website/src/pages/providers/snowflake.js @@ -0,0 +1,10 @@ +import React from 'react'; +import Head from '@docusaurus/Head'; + +export default function Registry() { + return ( + + + + ); +}; \ No newline at end of file diff --git a/website/src/pages/registry/awscc.js b/website/src/pages/registry/awscc.js deleted file mode 100644 index 3bdb1d8..0000000 --- a/website/src/pages/registry/awscc.js +++ /dev/null @@ -1,10 +0,0 @@ -import React from 'react'; -import Head from '@docusaurus/Head'; - -export default function Registry() { - return ( - - - - ); -}; \ No newline at end of file diff --git a/website/src/pages/registry/digitalocean.js b/website/src/pages/registry/digitalocean.js deleted file mode 100644 index fb60913..0000000 --- a/website/src/pages/registry/digitalocean.js +++ /dev/null @@ -1,10 +0,0 @@ -import React from 'react'; -import Head from '@docusaurus/Head'; - -export default function Registry() { - return ( - - - - ); -}; \ No newline at end of file diff --git a/website/src/pages/registry/index.js b/website/src/pages/registry/index.js deleted file mode 100644 index a985c4f..0000000 --- a/website/src/pages/registry/index.js +++ /dev/null @@ -1,10 +0,0 @@ -import React from 'react'; -import Head from '@docusaurus/Head'; - -export default function Registry() { - return ( - - - - ); -}; \ No newline at end of file diff --git a/website/src/pages/registry/k8s.js b/website/src/pages/registry/k8s.js deleted file mode 100644 index 8e2e32d..0000000 --- a/website/src/pages/registry/k8s.js +++ /dev/null @@ -1,10 +0,0 @@ -import React from 'react'; -import Head from '@docusaurus/Head'; - -export default function Registry() { - return ( - - - - ); -}; \ No newline at end of file diff --git a/website/src/pages/registry/linode.js b/website/src/pages/registry/linode.js deleted file mode 100644 index 4a63caa..0000000 --- a/website/src/pages/registry/linode.js +++ /dev/null @@ -1,10 +0,0 @@ -import React from 'react'; -import Head from '@docusaurus/Head'; - -export default function Registry() { - return ( - - - - ); -}; \ No newline at end of file diff --git a/website/src/pages/stackql-deploy.js b/website/src/pages/stackql-deploy.js new file mode 100644 index 0000000..fc1b070 --- /dev/null +++ b/website/src/pages/stackql-deploy.js @@ -0,0 +1,10 @@ +import React from 'react'; +import Head from '@docusaurus/Head'; + +export default function Home() { + return ( + + + + ); +}; \ No newline at end of file diff --git a/website/src/pages/downloads.js b/website/src/pages/tutorials.js similarity index 75% rename from website/src/pages/downloads.js rename to website/src/pages/tutorials.js index 19499d1..7e0e838 100644 --- a/website/src/pages/downloads.js +++ b/website/src/pages/tutorials.js @@ -1,10 +1,10 @@ import React from 'react'; import Head from '@docusaurus/Head'; -export default function Downloads() { +export default function Tutorials() { return ( - + ); }; \ No newline at end of file From c9f67d4c3dfa6d1b96ae6ea0d1403cd141dfc922 Mon Sep 17 00:00:00 2001 From: Jeffrey Aven Date: Wed, 13 Aug 2025 12:22:40 +1000 Subject: [PATCH 5/5] lint fixes --- stackql_deploy/cmd/build.py | 4 +++- stackql_deploy/lib/filters.py | 6 ++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stackql_deploy/cmd/build.py b/stackql_deploy/cmd/build.py index ee35fa2..f237bc1 100644 --- a/stackql_deploy/cmd/build.py +++ b/stackql_deploy/cmd/build.py @@ -193,7 +193,9 @@ def run(self, dry_run, show_queries, on_failure): if resource_exists and not is_correct_state: # bypass state check if skip_validation is set to true if resource.get('skip_validation', False): - self.logger.info(f"skipping validation for [{resource['name']}] as skip_validation is set to true.") + self.logger.info( + f"skipping validation for [{resource['name']}] as skip_validation is set to true." + ) is_correct_state = True elif statecheck_query: is_correct_state = self.check_if_resource_is_correct_state( diff --git a/stackql_deploy/lib/filters.py b/stackql_deploy/lib/filters.py index 629347a..47f0e3c 100644 --- a/stackql_deploy/lib/filters.py +++ b/stackql_deploy/lib/filters.py @@ -113,19 +113,17 @@ def sql_escape(value): """ Escapes a string for use as a SQL string literal by doubling any single quotes. This is useful for nested SQL statements where single quotes need to be escaped. - Args: value: The string to escape - Returns: The escaped string with single quotes doubled """ if value is None: return None - + if not isinstance(value, str): value = str(value) - + return value.replace("'", "''") #