diff --git a/.env.template b/.env.template index 4446ddb4..7f60392c 100644 --- a/.env.template +++ b/.env.template @@ -1,3 +1,7 @@ +# Server Configuration +ALLOWED_HOSTS='*' +SECRET_KEY='pbv(g=%7$$4rzvl88e24etn57-%n0uw-@y*=7ak422_3!zrc9+' + # This is set up to use the PostGIS container spun up by docker-compose # in the root of this repository. If you are using a different database, you will # need to update the DATABASE_URL variable below. diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..351e4869 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,29 @@ +name: Test + +on: + push: + branches: [ master, main ] + pull_request: + branches: [ master, main ] + +jobs: + test: + runs-on: ubuntu-24.04 # Use 24.04 for Podman 4.x (matches local dev environment) + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.12' + + - name: Install Podman Compose + run: | + pip install podman-compose + + - name: Build Images + run: make build + + - name: Run Tests + run: make test diff --git a/.gitignore b/.gitignore index 49598127..ce2a48b2 100644 --- a/.gitignore +++ b/.gitignore @@ -2,12 +2,13 @@ db/*.sqlite3 log/*.log -# tmp +# tmp & cache tmp/ .sass-cache/ profiling/ attachments/ build/ +.terraform/ # cover_me generated coverage @@ -26,6 +27,7 @@ coverage.data # environment env*/ .env* +.auto.tfvars # Local project settings src/project/local_settings.py diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 4ec2a0f2..00000000 --- a/.travis.yml +++ /dev/null @@ -1,53 +0,0 @@ -# https://github.com/travis-ci/travis-ci/wiki/.travis.yml-options - -# Set the distribution to Ubuntu 16.04 -dist: xenial - -# Set the language to Python -language: python - -# Configure PostgreSQL and PostGIS -services: - - postgresql -addons: - postgresql: 9.6 - apt: - packages: - - postgresql-9.6-postgis-2.4 - - # libevent development files are required for gevent - - libevent-dev - - # Install GeoDjango dependencies -- see - # https://docs.djangoproject.com/en/dev/ref/contrib/gis/install/#ubuntu - - binutils - - gdal-bin - - libgdal-dev - - libproj-dev - -# Set any project environment variables below here... -env: - - DATABASE_URL=postgis://postgres:postgres@localhost:5432/shareabouts - -install: "ci/install.sh" -#script: "src/manage.py test project sa_api_v2 remote_client_user --with-coverage --cover-package=sa_api_v2 --cover-package=remote_client_user" -script: "coverage run --include 'src/sa_api_v2/*' src/manage.py test project sa_api_v2" -after_success: "coverage report" -python: - - "2.7" - - "3.6" -notifications: - irc: - channels: - - "irc.freenode.org#shareabouts" - on_success: change - on_failure: always - email: - recipients: - - dev@openplans.org - on_success: change - on_failure: always - -# branches: -# only: -# - master diff --git a/Containerfile b/Containerfile index b9c13360..c27ed6aa 100644 --- a/Containerfile +++ b/Containerfile @@ -3,25 +3,42 @@ FROM ubuntu:24.04 # Install Python & GeoDjango dependencies RUN apt update && \ apt install -y \ - libpq-dev \ - libproj-dev \ - gdal-bin \ - python3 \ - python3-pip && \ + libpq-dev \ + libproj-dev \ + gdal-bin \ + python3 \ + python3-pip \ + python3-venv && \ apt clean +# Create a virtual environment +ENV VIRTUAL_ENV=/opt/venv +RUN python3 -m venv $VIRTUAL_ENV +ENV PATH="$VIRTUAL_ENV/bin:$PATH" +ENV PYTHONUNBUFFERED=1 + # Install Python dependencies COPY requirements.txt /tmp/requirements.txt -RUN pip3 install -r /tmp/requirements.txt --break-system-packages +RUN pip install --no-cache-dir -r /tmp/requirements.txt # Copy the application code to the container COPY src /app WORKDIR /app # Run collectstatic to gather static files -RUN REDIS_URL="redis://temp_value/" \ +# We pass dummy values for REDIS_URL and SECRET_KEY to ensure settings.py loads without error +RUN REDIS_URL="redis://dummy:6379/0" \ + SECRET_KEY="dummy" \ + ALLOWED_HOSTS="*" \ python3 manage.py collectstatic --noinput -# Expose the port the app runs on +# Copy gunicorn config COPY gunicorn.conf.py /app/gunicorn.conf.py + +# Expose the port the app runs on EXPOSE 8000 + +# Default command +CMD ["sh", "-c", "gunicorn project.wsgi --pythonpath src --workers 3 --config gunicorn.conf.py --bind 0.0.0.0:${PORT:-8000}"] + + diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..5a712284 --- /dev/null +++ b/Makefile @@ -0,0 +1,39 @@ +.PHONY: test-env test test-clean build gcp-push gcp-restart gcp-deploy + +# Build the container image +build: + podman build -t shareabouts-api -f Containerfile . + +# Push image to GCP Container Registry +# Requires: PROJECT_ID, ENVIRONMENT_NAME environment variables +gcp-push: + @if [ -z "$(PROJECT_ID)" ]; then echo "Error: PROJECT_ID is not set"; exit 1; fi + @if [ -z "$(ENVIRONMENT_NAME)" ]; then echo "Error: ENVIRONMENT_NAME is not set"; exit 1; fi + podman tag shareabouts-api gcr.io/$(PROJECT_ID)/shareabouts-api:latest-$(ENVIRONMENT_NAME) + podman push gcr.io/$(PROJECT_ID)/shareabouts-api:latest-$(ENVIRONMENT_NAME) + +# Restart the Cloud Run service with the latest image +# Requires: PROJECT_ID, ENVIRONMENT_NAME, SERVICE_NAME, REGION environment variables +gcp-restart: + @if [ -z "$(PROJECT_ID)" ]; then echo "Error: PROJECT_ID is not set"; exit 1; fi + @if [ -z "$(ENVIRONMENT_NAME)" ]; then echo "Error: ENVIRONMENT_NAME is not set"; exit 1; fi + @if [ -z "$(SERVICE_NAME)" ]; then echo "Error: SERVICE_NAME is not set"; exit 1; fi + @if [ -z "$(REGION)" ]; then echo "Error: REGION is not set"; exit 1; fi + gcloud run services update $(SERVICE_NAME)-$(ENVIRONMENT_NAME) \ + --region $(REGION) \ + --image gcr.io/$(PROJECT_ID)/shareabouts-api:latest-$(ENVIRONMENT_NAME) + +# Full deployment: build, push, and restart +gcp-deploy: build gcp-push gcp-restart + +# Stub .env file +test-env: + cp .env.template .env + +# Run tests in a clean container environment +test: test-env test-clean + podman-compose run --rm test + +# Just clean up containers +test-clean: + podman-compose down --remove-orphans 2>/dev/null || true diff --git a/compose.yml b/compose.yml index 6eba3633..eb72d625 100644 --- a/compose.yml +++ b/compose.yml @@ -1,5 +1,3 @@ -version: '3.8' - services: init: build: @@ -7,11 +5,13 @@ services: dockerfile: Containerfile command: > sh -c " - python3 manage.py migrate --noinput && + python3 manage.py migrate --noinput && python3 manage.py ensuresuperuser --noinput && python3 manage.py createdefaultdataset " env_file: .env + environment: + - REDIS_URL=redis://redis:6379/0 depends_on: db: {"condition": "service_healthy"} redis: {"condition": "service_healthy"} @@ -21,8 +21,9 @@ services: build: context: . dockerfile: Containerfile - command: gunicorn project.wsgi --pythonpath src --workers ${WORKERS} --config gunicorn.conf.py --bind 0.0.0.0:8000 env_file: .env + environment: + - REDIS_URL=redis://redis:6379/0 ports: - "8000:8000" depends_on: @@ -38,6 +39,20 @@ services: env_file: .env environment: C_FORCE_ROOT: "true" + REDIS_URL: "redis://redis:6379/0" + depends_on: + db: {"condition": "service_healthy"} + redis: {"condition": "service_healthy"} + init: {"condition": "service_completed_successfully"} + + test: + build: + context: . + dockerfile: Containerfile + command: python3 manage.py test . + env_file: .env + environment: + - REDIS_URL=redis://redis:6379/0 depends_on: db: {"condition": "service_healthy"} redis: {"condition": "service_healthy"} diff --git a/doc/DEPLOY.md b/doc/DEPLOY.md index 29ceaf87..8e57a340 100644 --- a/doc/DEPLOY.md +++ b/doc/DEPLOY.md @@ -80,3 +80,93 @@ Deploying to Heroku 5. Connect the app with the repository (add a git remote) 6. Push to Heroku 7. Run database migrations (or copy the database from elsewhere) + +Deploying to Google Cloud Platform +---------------------------------- + +The GCP deployment uses OpenTofu (or Terraform) for infrastructure, Podman for +containerization, and Google Cloud Storage for media assets. + +### 1. Prerequisites + +- [OpenTofu](https://opentofu.org/) or [Terraform](https://www.terraform.io/) +- [Podman](https://podman.io/) or Docker +- [Google Cloud SDK (gcloud)](https://cloud.google.com/sdk) + +### 2. Infrastructure Setup + +Initialize and apply the OpenTofu configuration in the `infra/gcp` directory: + + cd infra/gcp + tofu init + tofu apply + +This will create the Cloud SQL instance, Cloud Run service, GCS bucket, and other necessary resources. + +### 3. Database Migration + +To import an existing database dump (e.g., from Heroku): + +1. **Convert to "Clean" SQL**: Use `pg_restore` with flags to ignore ownership and privileges that won't exist on Cloud SQL. + + pg_restore -O -x -f dump.sql input.dump + +2. **Upload to GCS**: + + gcloud storage cp dump.sql gs://your-migration-bucket/ + +3. **Grant Permissions**: Ensure the Cloud SQL service account can read from the bucket. + + gcloud storage buckets add-iam-policy-binding gs://your-migration-bucket \ + --member="serviceAccount:" \ + --role="roles/storage.objectViewer" + + *(You can find the service account email using `gcloud sql instances describe `)* + +4. **Run Import**: + + gcloud sql import sql gs://your-migration-bucket/dump.sql \ + --database= --user= + +### 4. Image Deployment + +A `Makefile` is provided for common deployment tasks. + +1. **Authenticate with Container Registry** (one-time setup): + + ```bash + gcloud auth configure-docker gcr.io + ``` + + *(For Podman, you may also need to run:)* + + ```bash + gcloud auth print-access-token | podman login -u oauth2accesstoken --password-stdin https://gcr.io + ``` + +2. **Set Environment Variables**: + + ```bash + export PROJECT_ID=your-project-id + export SERVICE_NAME=your-service-name + export ENVIRONMENT_NAME=your-environment-name + export REGION=your-region + ``` + +3. **Deploy** (build, push, and restart Cloud Run): + + ```bash + make gcp-deploy + ``` + + Or run individual steps: + + ```bash + make build # Build the container image locally + make gcp-push # Push image to GCR + make gcp-restart # Update the Cloud Run service + ``` + +### 5. Static Files + +Currently, static files are served directly by the container using `dj_static.Cling`. Ensure `STATIC_URL` and `STATICFILES_STORAGE` in `settings.py` are configured appropriately (local serving is the default if GCS static configuration is commented out). diff --git a/gunicorn.conf.py b/gunicorn.conf.py index 04e95309..e5bd2992 100644 --- a/gunicorn.conf.py +++ b/gunicorn.conf.py @@ -4,3 +4,6 @@ secure_scheme_headers = { 'X-FORWARDED-PROTO': 'https', } +accesslog = '-' +errorlog = '-' +timeout = 120 diff --git a/infra/gcp-domains/.auto.tfvars.example b/infra/gcp-domains/.auto.tfvars.example new file mode 100644 index 00000000..173793e2 --- /dev/null +++ b/infra/gcp-domains/.auto.tfvars.example @@ -0,0 +1,38 @@ +# Example configuration - update with your actual values +project_id = "example-shareabouts" +load_balancer_name = "custom-domains-abcd" + +# Add your domain mappings here +# Each key is a service name, value contains domains and cloud_run_service details +domain_mappings = { + # Example: + # shareabouts-api-dev = { + # domains = ["shareaboutsapi-gcp-dev.example.com"] + # cloud_run_service = { + # name = "shareabouts-api-dev" + # region = "us-central1" + # } + # } +} + +# Optional: default backend for unmatched requests +# default_backend_service = "projects/example-shareabouts/global/backendServices/default-backend" + +# Optional: redirect host for unmatched requests (used when default_backend_service is not set) +# default_redirect_host = "example.com" + +# Optional: Legacy host rules for existing backend services not managed by this project +# legacy_host_rules = { +# my-legacy-service = { +# hosts = ["legacy.example.com"] +# path_matcher = "legacy-example-com" +# backend_service = "https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-backend" +# } +# } + +# Optional: Group domains into separate SSL certificates +# domains not listed here will be grouped into a "default" certificate +# ssl_certs = { +# mycity-gov = ["suggest.mycity.gov", "suggest-staging.mycity.gov"] +# bikeshare-com = ["suggest.bikeshare.com"] +# } diff --git a/infra/gcp-domains/.terraform.lock.hcl b/infra/gcp-domains/.terraform.lock.hcl new file mode 100644 index 00000000..bf1af20a --- /dev/null +++ b/infra/gcp-domains/.terraform.lock.hcl @@ -0,0 +1,37 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/google" { + version = "5.45.2" + constraints = "~> 5.0" + hashes = [ + "h1:0RjrEaRJMIWbgQ4sBfjjLOy9tZiaKaq4r5J1iVz085E=", + "zh:0931f08e81f220ae3132169cfa4ed8e9d8d2045f29ca914afd8ee9e3e9cf56e0", + "zh:31afa45a4c8a0fd4abff564ecff8b69a97ac1813ead61c12f5f0bf5d33cec7f1", + "zh:536979e437aad59ba41465c9398d8e3d7d3702bfe2a51d80571862d48c817959", + "zh:748e14614be32350ece4e9249e09bc1d20e54421983734ded3a0df6d6674ea71", + "zh:7c8fe641666603aad6693207c8eaac679b9be15246d77090c73a1a84326d6084", + "zh:8095a513a0662323d99c25466b5a291c80b2b0c1857c7c7a7b1159f25dbe4439", + "zh:9453db86d14611cab26dba30daf56d1cfef929918207e9e3e78b58299fc8c4fe", + "zh:adaa5df5d40060409b6b66136c0ac37b99fb35ac2cf554c584649c236a18d95b", + "zh:af2f659b4bd1f44e578f203830bdab829b5e635fcf2a59ffa7e997c16e6611ad", + "zh:b75184fe5c162821b0524fa941d6a934c452e815d82e62675bb21bbdc9046dfc", + ] +} + +provider "registry.opentofu.org/hashicorp/random" { + version = "3.8.0" + constraints = "~> 3.0" + hashes = [ + "h1:ey4eBIHiuAC5xsblxtXghXE3nWwUvGqTT6KAsggiAwo=", + "zh:2d5e0bbfac7f15595739fe54a9ab8b8eea92fd6d879706139dad7ecaa5c01c19", + "zh:349e637066625d97aaa84db1b1418c86d6457cf9c5a62f6dcc3f55cbd535112c", + "zh:5f4456d53f5256ccfdb87dd35d3bf34578d01bd9b71cffaf507f0692805eac8a", + "zh:6c1ecfacc5f7079a068d7f8eb8924485d4ec8183f36e6318a6e748d35921ddac", + "zh:6d86641edeb8c394f121f7b0a691d72f89cf9b938b987a01fc32aad396a50555", + "zh:76947bd7bc7033b33980538da149c94e386f9b0abb2ce63733f25a57517e4742", + "zh:79c07f4c8b3a63d9f89e25e4348b462c57e179bca66ba533710851c485e282db", + "zh:ac1c2b941d994728a3a93aba093fd2202f9311d099ff85f66678897c792161ba", + "zh:cbb2aa867fd828fcb4125239e00862b9a3bc2f280e945c760224276b476f4c49", + ] +} diff --git a/infra/gcp-domains/README.md b/infra/gcp-domains/README.md new file mode 100644 index 00000000..58ab68f7 --- /dev/null +++ b/infra/gcp-domains/README.md @@ -0,0 +1,64 @@ +# Central Domain Management + +This Tofu project manages the shared load balancer and domain mappings for all services. + +## Architecture + +All domain routing is centralized here because: +- `google_cloud_run_domain_mapping` is designed for Cloud Run v1 API +- Our services use `google_cloud_run_v2_service` (v2 API) +- The recommended v2 approach is a Global External Application Load Balancer + +This project dynamically creates: +- Serverless NEGs for each Cloud Run service +- Backend Services for each NEG +- URL Map host rules for domain routing +- Managed SSL certificates + +## Setup + +1. Copy `.auto.tfvars.example` to `.auto.tfvars` and configure +2. Initialize: `tofu init` +3. Import existing resources (first time only): + ```bash + tofu import google_compute_url_map.default projects/poepublic-shareabouts/global/urlMaps/custom-domains-b84d + tofu import google_compute_target_https_proxy.default projects/poepublic-shareabouts/global/targetHttpsProxies/PROXY_NAME + tofu import google_compute_global_forwarding_rule.default projects/poepublic-shareabouts/global/forwardingRules/RULE_NAME + ``` +4. Plan and apply: `tofu plan && tofu apply` + +## Adding a New Service + +1. Deploy your service (e.g., in `../gcp/`) +2. Add an entry to `domain_mappings` in `.auto.tfvars`: + ```hcl + domain_mappings = { + my-service = { + domains = ["my-domain.example.com"] + cloud_run_service = { + name = "my-service-name" # From service project output + region = "us-central1" + } + } + } + ``` +3. Run `tofu apply` + +## SSL Certificate Management + +To optimize quota usage and avoid re-provisioning all domains when one changes, you can group domains into separate managed SSL certificates using the `ssl_certs` variable. + +```hcl +ssl_certs = { + group-name-1 = ["domain1.com", "domain2.com"] + group-name-2 = ["domain3.com"] +} +``` + +- Any domains listed in `ssl_certs` will get their own dedicated certificate. +- Any domains **not** listed in `ssl_certs` (but present in `domain_mappings` or `legacy_host_rules`) will be automatically bundled into a "default" certificate. +- This allows you to isolate high-churn domains or group by organization. + +## Future Considerations + +If Google introduces a v2-compatible domain mapping resource (e.g., `google_cloud_run_v2_domain_mapping`), consider migrating domain configuration back to individual service projects for simpler management. diff --git a/infra/gcp-domains/backend.tf b/infra/gcp-domains/backend.tf new file mode 100644 index 00000000..71bce532 --- /dev/null +++ b/infra/gcp-domains/backend.tf @@ -0,0 +1,35 @@ +# ------------------------------------------------------------------------------ +# Serverless Network Endpoint Groups (NEGs) +# One per service in domain_mappings +# ------------------------------------------------------------------------------ +resource "google_compute_region_network_endpoint_group" "serverless_neg" { + for_each = var.domain_mappings + + name = "${each.key}-neg" + network_endpoint_type = "SERVERLESS" + region = each.value.cloud_run_service.region + + cloud_run { + service = each.value.cloud_run_service.name + } +} + +# ------------------------------------------------------------------------------ +# Backend Services +# One per service in domain_mappings +# ------------------------------------------------------------------------------ +resource "google_compute_backend_service" "default" { + for_each = var.domain_mappings + + name = "${each.key}-backend" + protocol = "HTTP" + load_balancing_scheme = "EXTERNAL_MANAGED" + + backend { + group = google_compute_region_network_endpoint_group.serverless_neg[each.key].id + } + + log_config { + enable = true + } +} diff --git a/infra/gcp-domains/main.tf b/infra/gcp-domains/main.tf new file mode 100644 index 00000000..07cced21 --- /dev/null +++ b/infra/gcp-domains/main.tf @@ -0,0 +1,116 @@ +# ------------------------------------------------------------------------------ +# URL Map +# This resource should be IMPORTED from the existing load balancer. +# Run: tofu import google_compute_url_map.default projects/PROJECT_ID/global/urlMaps/LOAD_BALANCER_NAME +# +# NOTE: Existing host rules from the imported URL map that are not in +# domain_mappings will be removed. To preserve them, add them to domain_mappings +# or use legacy_host_rules variable. +# ------------------------------------------------------------------------------ +resource "google_compute_url_map" "default" { + name = var.load_balancer_name + + # Default: redirect unmatched hosts to a specified URL, or return 404 + dynamic "default_url_redirect" { + for_each = var.default_backend_service == null ? [1] : [] + content { + https_redirect = true + redirect_response_code = "FOUND" + strip_query = false + host_redirect = var.default_redirect_host + } + } + + # If a default backend service is specified, use it instead of redirect + default_service = var.default_backend_service + + # Legacy host rules (for existing mappings not managed by this project) + dynamic "host_rule" { + for_each = var.legacy_host_rules + content { + hosts = host_rule.value.hosts + path_matcher = host_rule.value.path_matcher + } + } + + # Dynamic host rules for each service in domain_mappings + dynamic "host_rule" { + for_each = var.domain_mappings + content { + hosts = host_rule.value.domains + path_matcher = host_rule.key + } + } + + # Path matchers for legacy host rules (uses existing backend services) + dynamic "path_matcher" { + for_each = var.legacy_host_rules + content { + name = path_matcher.value.path_matcher + default_service = path_matcher.value.backend_service + } + } + + # Path matchers for domain_mappings (uses our created backend services) + dynamic "path_matcher" { + for_each = var.domain_mappings + content { + name = path_matcher.key + default_service = google_compute_backend_service.default[path_matcher.key].id + } + } +} + +# ------------------------------------------------------------------------------ +# Target HTTPS Proxy +# This resource should be IMPORTED from the existing load balancer. +# Run: tofu import google_compute_target_https_proxy.default projects/PROJECT_ID/global/targetHttpsProxies/PROXY_NAME +# ------------------------------------------------------------------------------ +resource "google_compute_target_https_proxy" "default" { + name = "${var.load_balancer_name}-proxy" + url_map = google_compute_url_map.default.id + certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.default.id}" +} + + +# ------------------------------------------------------------------------------ +# Global Forwarding Rule +# This resource should be IMPORTED from the existing load balancer. +# Run: tofu import google_compute_global_forwarding_rule.default projects/PROJECT_ID/global/forwardingRules/RULE_NAME +# ------------------------------------------------------------------------------ +resource "google_compute_global_forwarding_rule" "default" { + name = "${var.load_balancer_name}-fe" + target = google_compute_target_https_proxy.default.id + port_range = "443" + load_balancing_scheme = "EXTERNAL_MANAGED" +} + +# ------------------------------------------------------------------------------ +# HTTP to HTTPS Redirect +# These resources handle HTTP requests and redirect them to HTTPS +# ------------------------------------------------------------------------------ + +# URL Map for HTTP redirect (redirects all HTTP to HTTPS) +resource "google_compute_url_map" "http_redirect" { + name = "${var.load_balancer_name}-http" + + default_url_redirect { + https_redirect = true + redirect_response_code = "MOVED_PERMANENTLY_DEFAULT" + strip_query = false + } +} + +# Target HTTP Proxy +resource "google_compute_target_http_proxy" "default" { + name = "${var.load_balancer_name}-proxy-http" + url_map = google_compute_url_map.http_redirect.id +} + +# Global Forwarding Rule for HTTP +resource "google_compute_global_forwarding_rule" "http" { + name = "${var.load_balancer_name}-fe-http" + target = google_compute_target_http_proxy.default.id + port_range = "80" + load_balancing_scheme = "EXTERNAL_MANAGED" +} diff --git a/infra/gcp-domains/outputs.tf b/infra/gcp-domains/outputs.tf new file mode 100644 index 00000000..931e8c31 --- /dev/null +++ b/infra/gcp-domains/outputs.tf @@ -0,0 +1,16 @@ +output "load_balancer_ip" { + description = "The IP address of the load balancer" + value = google_compute_global_forwarding_rule.default.ip_address +} + +output "ssl_certificate_domains" { + description = "The domains covered by the Certificate Manager certificates" + value = { + for k, v in google_certificate_manager_certificate.default : k => v.managed[0].domains + } +} + +output "configured_services" { + description = "The services configured in the URL map" + value = keys(var.domain_mappings) +} diff --git a/infra/gcp-domains/provider.tf b/infra/gcp-domains/provider.tf new file mode 100644 index 00000000..5c00a123 --- /dev/null +++ b/infra/gcp-domains/provider.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.0" + } + } + + backend "gcs" { + bucket = "${var.project_id}-tfstate" + prefix = "domains" + } +} + +provider "google" { + project = var.project_id + region = var.region +} diff --git a/infra/gcp-domains/secrets.tf b/infra/gcp-domains/secrets.tf new file mode 100644 index 00000000..1045fc13 --- /dev/null +++ b/infra/gcp-domains/secrets.tf @@ -0,0 +1,23 @@ +resource "google_secret_manager_secret" "tfvars" { + secret_id = "${var.load_balancer_name}-tfvars" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "tfvars" { + secret = google_secret_manager_secret.tfvars.id + + secret_data = < [d] } + ) + + # Flattened list of all domain->cert mapping entries + # This creates a list of objects like: { domain = "example.com", cert_key = "example-com" } + certificate_map_entries = flatten([ + for cert_key, domains in local.certificate_map : [ + for domain in domains : { + domain = domain + cert_key = cert_key + } + ] + ]) +} + +# ------------------------------------------------------------------------------ +# Certificate Map +# ------------------------------------------------------------------------------ +resource "google_certificate_manager_certificate_map" "default" { + name = "${var.load_balancer_name}-map" + description = "Certificate map for ${var.load_balancer_name}" +} + +# ------------------------------------------------------------------------------ +# Certificate Manager Certificates +# ------------------------------------------------------------------------------ +resource "google_certificate_manager_certificate" "default" { + for_each = local.certificate_map + + name = "${var.load_balancer_name}-${each.key}-cert" + description = "Managed certificate for group: ${each.key}" + scope = "DEFAULT" + + managed { + domains = each.value + } + + # Be careful when destroying certificates; some cannot be as + # easily provisioned as others, especially if you don't control + # the DNS records for the domains. + lifecycle { + prevent_destroy = true + } +} + +# ------------------------------------------------------------------------------ +# Certificate Map Entries +# ------------------------------------------------------------------------------ +resource "google_certificate_manager_certificate_map_entry" "default" { + for_each = { + for entry in local.certificate_map_entries : entry.domain => entry + } + + name = "entry-${replace(each.value.domain, ".", "-")}" + description = "Map entry for ${each.value.domain}" + map = google_certificate_manager_certificate_map.default.name + + certificates = [google_certificate_manager_certificate.default[each.value.cert_key].id] + hostname = each.value.domain +} diff --git a/infra/gcp-domains/variables.tf b/infra/gcp-domains/variables.tf new file mode 100644 index 00000000..e8d69bce --- /dev/null +++ b/infra/gcp-domains/variables.tf @@ -0,0 +1,55 @@ +variable "project_id" { + description = "The GCP project ID" + type = string +} + +variable "region" { + description = "The default GCP region" + type = string + default = "us-central1" +} + +variable "load_balancer_name" { + description = "The name of the load balancer (URL map name)" + type = string +} + +variable "domain_mappings" { + description = "Map of service names to their domain and Cloud Run service configurations" + type = map(object({ + domains = list(string) + cloud_run_service = object({ + name = string + region = string + }) + })) + default = {} +} + +variable "legacy_host_rules" { + description = "Existing host rules to preserve (for backend services not managed by this project)" + type = map(object({ + hosts = list(string) + path_matcher = string + backend_service = string # Full backend service URL + })) + default = {} +} + +variable "default_backend_service" { + description = "The default backend service for unmatched requests (optional, if not set uses redirect)" + type = string + default = null +} + +variable "default_redirect_host" { + description = "The host to redirect unmatched requests to (used when default_backend_service is null)" + type = string + default = "poepublic.com" +} + +variable "ssl_certs" { + type = map(list(string)) + description = "A map of custom SSL certificate groups. Any domains not in this map will be grouped into a default certificate." + default = {} +} diff --git a/infra/gcp/.terraform.lock.hcl b/infra/gcp/.terraform.lock.hcl new file mode 100644 index 00000000..594e2bb9 --- /dev/null +++ b/infra/gcp/.terraform.lock.hcl @@ -0,0 +1,55 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/google" { + version = "5.45.2" + constraints = "~> 5.0" + hashes = [ + "h1:0RjrEaRJMIWbgQ4sBfjjLOy9tZiaKaq4r5J1iVz085E=", + "zh:0931f08e81f220ae3132169cfa4ed8e9d8d2045f29ca914afd8ee9e3e9cf56e0", + "zh:31afa45a4c8a0fd4abff564ecff8b69a97ac1813ead61c12f5f0bf5d33cec7f1", + "zh:536979e437aad59ba41465c9398d8e3d7d3702bfe2a51d80571862d48c817959", + "zh:748e14614be32350ece4e9249e09bc1d20e54421983734ded3a0df6d6674ea71", + "zh:7c8fe641666603aad6693207c8eaac679b9be15246d77090c73a1a84326d6084", + "zh:8095a513a0662323d99c25466b5a291c80b2b0c1857c7c7a7b1159f25dbe4439", + "zh:9453db86d14611cab26dba30daf56d1cfef929918207e9e3e78b58299fc8c4fe", + "zh:adaa5df5d40060409b6b66136c0ac37b99fb35ac2cf554c584649c236a18d95b", + "zh:af2f659b4bd1f44e578f203830bdab829b5e635fcf2a59ffa7e997c16e6611ad", + "zh:b75184fe5c162821b0524fa941d6a934c452e815d82e62675bb21bbdc9046dfc", + ] +} + +provider "registry.opentofu.org/hashicorp/google-beta" { + version = "5.45.2" + constraints = "~> 5.0" + hashes = [ + "h1:KkOlqypKHYYvfzrqVPdhfiyHwYeafFy0WD805a01eSg=", + "zh:2df6e40591ceee7ee77d429ea072c9d51fef2dd04015b2604ff332a2af4ac819", + "zh:4096af21991ba76ab81c8cb00c0eb0bd4f22619f7e491d60023fb10b8b33bfb1", + "zh:44ded286956fff5668f1acbf152b62ca8e6a03abc8df12c5c181bc2ca05b4df7", + "zh:7ae19e1b53a0e26bea0acb9a96b4b44038d7c182c3fdd496148fd20e40aa78e1", + "zh:81c9812823b78fd1b12bc0acd6dae35bc573944950e09eaf237b2e83b6b587d7", + "zh:9db6101421b53b9533807928c651e779f5b8129f4a57ff892bf256c84ba6ed29", + "zh:b779729cb08829f621a718ecdfdb503c310ef5411e694996c7cfda7227221134", + "zh:c43edb31aee354317a6181272a961965b93722fd18637f38c395af013aa65617", + "zh:dbb93970a85f2fe84f650b6a4da694ecb1023a99c3b9bbf6953dccd074fa49ce", + "zh:df9d13853269e98651d495571b4d58c883b4386247d0b9c5495c2e82ef721f45", + ] +} + +provider "registry.opentofu.org/hashicorp/random" { + version = "3.7.2" + hashes = [ + "h1:yHMBbZOIHlXUuBQ8Mhioe0hwmhermuboq2eNNoCJaf8=", + "zh:2ffeb1058bd7b21a9e15a5301abb863053a2d42dffa3f6cf654a1667e10f4727", + "zh:519319ed8f4312ed76519652ad6cd9f98bc75cf4ec7990a5684c072cf5dd0a5d", + "zh:7371c2cc28c94deb9dba62fbac2685f7dde47f93019273a758dd5a2794f72919", + "zh:9b0ac4c1d8e36a86b59ced94fa517ae9b015b1d044b3455465cc6f0eab70915d", + "zh:c6336d7196f1318e1cbb120b3de8426ce43d4cacd2c75f45dba2dbdba666ce00", + "zh:c71f18b0cb5d55a103ea81e346fb56db15b144459123f1be1b0209cffc1deb4e", + "zh:d2dc49a6cac2d156e91b0506d6d756809e36bf390844a187f305094336d3e8d8", + "zh:d5b5fc881ccc41b268f952dae303501d6ec9f9d24ee11fe2fa56eed7478e15d0", + "zh:db9723eaca26d58c930e13fde221d93501529a5cd036b1f167ef8cff6f1a03cc", + "zh:fe3359f733f3ab518c6f85f3a9cd89322a7143463263f30321de0973a52d4ad8", + ] +} diff --git a/infra/gcp/cloudrun.tf b/infra/gcp/cloudrun.tf new file mode 100644 index 00000000..04583660 --- /dev/null +++ b/infra/gcp/cloudrun.tf @@ -0,0 +1,101 @@ +locals { + # Cloud Run service URL -- ideally we would pull this from the service resource + # itself, but we need to set it as an environment variable for ALLOWED_HOSTS + # before the service is created. + default_cloud_run_domain = "${var.service_name}-${var.environment}-${var.project_id}-${var.region}.run.app" +} + +resource "google_cloud_run_v2_service" "default" { + name = "${var.service_name}-${var.environment}" + location = var.region + ingress = "INGRESS_TRAFFIC_ALL" + + template { + service_account = google_service_account.sa.email + vpc_access { + connector = google_vpc_access_connector.connector.id + egress = "ALL_TRAFFIC" + } + + containers { + image = "gcr.io/${var.project_id}/${var.service_name}:latest-${var.environment}" # Assumes image is pushed + + resources { + limits = { + cpu = "1000m" + memory = "1Gi" + } + } + + + env { + name = "DB_PASSWORD" + value_source { + secret_key_ref { + secret = google_secret_manager_secret.db_password.secret_id + version = "latest" + } + } + } + + env { + name = "DATABASE_HOST" + value = google_sql_database_instance.instance.private_ip_address + } + env { + name = "DATABASE_NAME" + value = google_sql_database.database.name + } + env { + name = "DATABASE_USER" + value = google_sql_user.user.name + } + + env { + name = "REDIS_URL" + value = "redis://${google_redis_instance.cache.host}:${google_redis_instance.cache.port}/0" + } + env { + name = "REDIS_KEY_PREFIX" + value = var.environment + } + env { + name = "GS_BUCKET_NAME" + value = google_storage_bucket.static.name + } + env { + name = "GS_PROJECT_ID" + value = var.project_id + } + env { + name = "SECRET_KEY" + value_source { + secret_key_ref { + secret = google_secret_manager_secret.secret_key.secret_id + version = "latest" + } + } + } + env { + name = "DEBUG" + value = "False" + } + env { + name = "ALLOWED_HOSTS" + value = join(",", concat( + [local.default_cloud_run_domain], + var.domain_names, + var.additional_allowed_hosts + )) + } + } + } + depends_on = [google_project_service.apis] +} + +resource "google_cloud_run_service_iam_member" "public" { + service = google_cloud_run_v2_service.default.name + location = google_cloud_run_v2_service.default.location + role = "roles/run.invoker" + member = "allUsers" +} diff --git a/infra/gcp/db.tf b/infra/gcp/db.tf new file mode 100644 index 00000000..bc67b02c --- /dev/null +++ b/infra/gcp/db.tf @@ -0,0 +1,28 @@ +# Cloud SQL instance is shared across environments; different databases are +# created within the instance for each environment. +resource "google_sql_database_instance" "instance" { + name = "${var.service_name}-db-instance" + region = var.region + database_version = "POSTGRES_15" + depends_on = [google_service_networking_connection.private_vpc_connection] + + settings { + tier = "db-f1-micro" # Smallest for testing + ip_configuration { + ipv4_enabled = false + private_network = google_compute_network.vpc.id + } + } + deletion_protection = false # For easier cleanup during testing +} + +resource "google_sql_database" "database" { + name = "${var.service_name}-${var.environment}" + instance = google_sql_database_instance.instance.name +} + +resource "google_sql_user" "user" { + name = "${var.service_name}-${var.environment}" + instance = google_sql_database_instance.instance.name + password = random_password.db_password.result +} diff --git a/infra/gcp/domain.tf b/infra/gcp/domain.tf new file mode 100644 index 00000000..5e174c6a --- /dev/null +++ b/infra/gcp/domain.tf @@ -0,0 +1,48 @@ +# ------------------------------------------------------------------------------ +# Domain Mapping +# ------------------------------------------------------------------------------ +# +# Domain mapping for this Cloud Run service is managed in a separate project: +# ../gcp-domains/ +# +# WHY A SEPARATE PROJECT? +# ----------------------- +# The `google_cloud_run_domain_mapping` resource is designed for the Cloud Run +# v1 API, but this service uses `google_cloud_run_v2_service`. While v1 domain +# mapping may technically work, it's not officially recommended for v2 services +# and may have compatibility issues or limited feature support. +# +# The recommended approach for v2 is to use a Global External Application Load +# Balancer with: +# - Serverless NEG (Network Endpoint Group) +# - Backend Service +# - URL Map with host rules +# - Managed SSL certificates +# +# Since we share a load balancer across multiple services, this configuration +# lives in the central `gcp-domains` project to avoid Terraform state conflicts. +# +# FUTURE CONSIDERATIONS +# --------------------- +# If Google introduces a v2-compatible domain mapping resource (e.g., +# `google_cloud_run_v2_domain_mapping`), it would be simpler to manage domain +# configuration directly in this project alongside the Cloud Run service. +# Monitor the Terraform Google provider changelog for updates. +# +# TO CONFIGURE DOMAINS +# -------------------- +# 1. Deploy this service: `tofu apply` +# 2. Add an entry to `../gcp-domains/.auto.tfvars`: +# +# domain_mappings = { +# shareabouts-api-dev = { +# domains = ["shareaboutsapi-gcp-dev.poepublic.com"] +# cloud_run_service = { +# name = "shareabouts-api-dev" # Use output: cloud_run_service_name +# region = "us-central1" +# } +# } +# } +# +# 3. Apply the domain mapping: `cd ../gcp-domains && tofu apply` +# ------------------------------------------------------------------------------ diff --git a/infra/gcp/iam.tf b/infra/gcp/iam.tf new file mode 100644 index 00000000..89625255 --- /dev/null +++ b/infra/gcp/iam.tf @@ -0,0 +1,32 @@ +# Service Account for Cloud Run +resource "google_service_account" "sa" { + account_id = "${var.service_name}-${var.environment}-sa" + display_name = "Cloud Run Service Account" +} + +# Grant Cloud Run SA access to Cloud SQL +resource "google_project_iam_member" "sql_client" { + project = var.project_id + role = "roles/cloudsql.client" + member = "serviceAccount:${google_service_account.sa.email}" +} + +# Grant Cloud Run SA access to GCS +resource "google_storage_bucket_iam_member" "storage_admin" { + bucket = google_storage_bucket.static.name + role = "roles/storage.objectAdmin" + member = "serviceAccount:${google_service_account.sa.email}" +} + +resource "google_secret_manager_secret_iam_member" "db_password_access" { + secret_id = google_secret_manager_secret.db_password.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${google_service_account.sa.email}" +} + +resource "google_secret_manager_secret_iam_member" "secret_key_access" { + secret_id = google_secret_manager_secret.secret_key.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${google_service_account.sa.email}" +} + diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf new file mode 100644 index 00000000..eb954567 --- /dev/null +++ b/infra/gcp/main.tf @@ -0,0 +1,18 @@ +# Enable necessary APIs +resource "google_project_service" "apis" { + for_each = toset([ + "run.googleapis.com", + "sqladmin.googleapis.com", + "redis.googleapis.com", + "vpcaccess.googleapis.com", + "servicenetworking.googleapis.com", + "compute.googleapis.com", + "storage.googleapis.com", + "iam.googleapis.com", + "secretmanager.googleapis.com", + + ]) + + service = each.key + disable_on_destroy = false +} diff --git a/infra/gcp/network.tf b/infra/gcp/network.tf new file mode 100644 index 00000000..4be58ce7 --- /dev/null +++ b/infra/gcp/network.tf @@ -0,0 +1,36 @@ +resource "google_compute_network" "vpc" { + name = "${var.service_name}-vpc" + auto_create_subnetworks = false + depends_on = [google_project_service.apis] +} + +resource "google_compute_subnetwork" "subnet" { + name = "${var.service_name}-subnet" + ip_cidr_range = "10.0.0.0/24" + region = var.region + network = google_compute_network.vpc.id +} + +resource "google_vpc_access_connector" "connector" { + name = "${var.service_name}-conn" + region = var.region + ip_cidr_range = "10.8.0.0/28" + network = google_compute_network.vpc.id + depends_on = [google_project_service.apis] +} + +# Private Service Access for Cloud SQL and Redis +resource "google_compute_global_address" "private_ip_address" { + name = "${var.service_name}-private-ip" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.vpc.id +} + +resource "google_service_networking_connection" "private_vpc_connection" { + network = google_compute_network.vpc.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] + depends_on = [google_project_service.apis] +} diff --git a/infra/gcp/outputs.tf b/infra/gcp/outputs.tf new file mode 100644 index 00000000..c66d0f88 --- /dev/null +++ b/infra/gcp/outputs.tf @@ -0,0 +1,20 @@ +output "service_url" { + value = google_cloud_run_v2_service.default.uri +} + +output "database_connection_name" { + value = google_sql_database_instance.instance.connection_name +} + +output "redis_host" { + value = google_redis_instance.cache.host +} + +output "bucket_name" { + value = google_storage_bucket.static.name +} + +output "cloud_run_service_name" { + description = "The Cloud Run service name. Use this in the gcp-domains project's domain_mappings variable." + value = google_cloud_run_v2_service.default.name +} diff --git a/infra/gcp/provider.tf b/infra/gcp/provider.tf new file mode 100644 index 00000000..7da12d7e --- /dev/null +++ b/infra/gcp/provider.tf @@ -0,0 +1,31 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + google-beta = { + source = "hashicorp/google-beta" + version = "~> 5.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.0" + } + } + + backend "gcs" { + bucket = "${var.project_id}-tfstate" + prefix = "api/${var.environment}" + } +} + +provider "google" { + project = var.project_id + region = var.region +} + +provider "google-beta" { + project = var.project_id + region = var.region +} diff --git a/infra/gcp/redis.tf b/infra/gcp/redis.tf new file mode 100644 index 00000000..88ea0442 --- /dev/null +++ b/infra/gcp/redis.tf @@ -0,0 +1,13 @@ +# Memorystore for Redis instance is shared across environments; use +# environment name as key prefix to differentiate. +resource "google_redis_instance" "cache" { + name = "${var.service_name}-redis" + tier = "BASIC" + memory_size_gb = 1 + region = var.region + + authorized_network = google_compute_network.vpc.id + connect_mode = "DIRECT_PEERING" + + depends_on = [google_service_networking_connection.private_vpc_connection] +} diff --git a/infra/gcp/secrets.tf b/infra/gcp/secrets.tf new file mode 100644 index 00000000..a872875b --- /dev/null +++ b/infra/gcp/secrets.tf @@ -0,0 +1,53 @@ +resource "google_secret_manager_secret" "db_password" { + secret_id = "${var.service_name}-${var.environment}-db-password" + replication { + auto {} + } +} + +resource "random_password" "db_password" { + length = 16 + special = false +} + +resource "google_secret_manager_secret_version" "db_password" { + secret = google_secret_manager_secret.db_password.id + secret_data = random_password.db_password.result +} + +resource "google_secret_manager_secret" "secret_key" { + secret_id = "${var.service_name}-${var.environment}-secret-key" + replication { + auto {} + } +} + +resource "random_password" "secret_key" { + length = 50 + special = true +} + +resource "google_secret_manager_secret_version" "secret_key" { + secret = google_secret_manager_secret.secret_key.id + secret_data = random_password.secret_key.result +} + +resource "google_secret_manager_secret" "tfvars" { + secret_id = "${var.service_name}-${var.environment}-tfvars" + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "tfvars" { + secret = google_secret_manager_secret.tfvars.id + + secret_data = < S3 > Local -if all([key in environ for key in ('SHAREABOUTS_AWS_KEY', - 'SHAREABOUTS_AWS_SECRET', - 'SHAREABOUTS_AWS_BUCKET')]): +DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' +STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' +ATTACHMENT_STORAGE = DEFAULT_FILE_STORAGE + +if 'GS_BUCKET_NAME' in environ: + # Google Cloud Storage + GS_BUCKET_NAME = environ['GS_BUCKET_NAME'] + GS_PROJECT_ID = environ.get('GS_PROJECT_ID') + + DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage" + + GS_DEFAULT_ACL = "publicRead" + + # TODO: Use GCS for static file storage; With this commented out, the + # default behavior of dj_static.Cling is used. + # # Static files + # STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage" + # STATIC_URL = f"https://storage.googleapis.com/{GS_BUCKET_NAME}/static/" + + # Media files + MEDIA_URL = f"https://storage.googleapis.com/{GS_BUCKET_NAME}/media/" + + # Attachments + ATTACHMENT_STORAGE = DEFAULT_FILE_STORAGE + +elif all([key in environ for key in ('SHAREABOUTS_AWS_KEY', + 'SHAREABOUTS_AWS_SECRET', + 'SHAREABOUTS_AWS_BUCKET')]): + # AWS S3 AWS_ACCESS_KEY_ID = environ['SHAREABOUTS_AWS_KEY'] AWS_SECRET_ACCESS_KEY = environ['SHAREABOUTS_AWS_SECRET'] AWS_STORAGE_BUCKET_NAME = environ['SHAREABOUTS_AWS_BUCKET'] @@ -407,6 +471,7 @@ def custom_show_toolbar(request): DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage' ATTACHMENT_STORAGE = DEFAULT_FILE_STORAGE + if 'SHAREABOUTS_TWITTER_KEY' in environ and 'SHAREABOUTS_TWITTER_SECRET' in environ: SOCIAL_AUTH_TWITTER_KEY = environ['SHAREABOUTS_TWITTER_KEY'] SOCIAL_AUTH_TWITTER_SECRET = environ['SHAREABOUTS_TWITTER_SECRET'] diff --git a/src/project/urls.py b/src/project/urls.py index d20846fe..4ec963bc 100644 --- a/src/project/urls.py +++ b/src/project/urls.py @@ -11,11 +11,16 @@ admin.autodiscover() +def raise_error(request): + raise Exception("Test error") + urlpatterns = [ # Examples: # re_path(r'^$', 'project.views.home', name='home'), # re_path(r'^project/', include('project.foo.urls')), + re_path(r'^test-error/', raise_error), + # NOTE: Redirect all manager urls until the manager is fixed. re_path(r'^$', lambda x: HttpResponseRedirect(resolve_url(settings.ROOT_REDIRECT_TO))),