From 810cad63af0d2033ec6e35596fcb71b96fef482a Mon Sep 17 00:00:00 2001 From: Subhash Khileri Date: Wed, 21 Jan 2026 12:27:22 +0530 Subject: [PATCH 1/4] local test runner --- .gitignore | 7 + .ibm/pipelines/openshift-ci-tests.sh | 8 +- .ibm/pipelines/utils.sh | 38 ++- e2e-tests/README.md | 378 +++++++++++++++++++++++++++ e2e-tests/container-init.sh | 141 ++++++++++ e2e-tests/local-run.sh | 335 ++++++++++++++++++++++++ e2e-tests/local-test-setup.sh | 128 +++++++++ 7 files changed, 1028 insertions(+), 7 deletions(-) create mode 100644 e2e-tests/container-init.sh create mode 100755 e2e-tests/local-run.sh create mode 100755 e2e-tests/local-test-setup.sh diff --git a/.gitignore b/.gitignore index d0074a9124..a711e6a4f7 100644 --- a/.gitignore +++ b/.gitignore @@ -76,6 +76,13 @@ dynamic-plugins-root/* .ibm/pipelines/artifact_dir/* .ibm/pipelines/env_override.local.sh +# E2E local test runner work directory +e2e-tests/.local-test/ + +# E2E test artifacts (generated by Playwright) +e2e-tests/playwright-report/ +e2e-tests/test-results/ + # Python Caches **/__pycache__/ **/.pytest_cache/ diff --git a/.ibm/pipelines/openshift-ci-tests.sh b/.ibm/pipelines/openshift-ci-tests.sh index 81340c2f3a..e648da9a7b 100755 --- a/.ibm/pipelines/openshift-ci-tests.sh +++ b/.ibm/pipelines/openshift-ci-tests.sh @@ -15,9 +15,11 @@ source "${DIR}/lib/log.sh" export OPENSHIFT_CI="${OPENSHIFT_CI:-false}" if [[ -z "${OPENSHIFT_CI}" || "${OPENSHIFT_CI}" == "false" ]]; then # NOTE: Use this file to override the environment variables for the local testing. - log::debug "Sourcing env_override.local.sh" - # shellcheck source=.ibm/pipelines/env_override.local.sh - source "${DIR}/env_override.local.sh" + if [[ -f "${DIR}/env_override.local.sh" ]]; then + log::debug "Sourcing env_override.local.sh" + # shellcheck source=.ibm/pipelines/env_override.local.sh + source "${DIR}/env_override.local.sh" + fi fi log::debug "Sourcing env_variables.sh" diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index b3a5ed1a7f..11901def6a 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -9,7 +9,7 @@ retrieve_pod_logs() { local pod_name=$1 local container=$2 local namespace=$3 - local log_timeout=${4:-30} # Default timeout: 30 seconds + local log_timeout=${4:-5} # Default timeout: 5 seconds (reduced from 30s to speed up failure cases) log::debug "Retrieving logs for container: $container" # Save logs for the current and previous container with timeout to prevent hanging timeout "${log_timeout}" kubectl logs "$pod_name" -c "$container" -n "$namespace" > "pod_logs/${pod_name}_${container}.log" 2> /dev/null || { log::warn "logs for container $container not found or timed out"; } @@ -666,8 +666,7 @@ deploy_test_backstage_customization_provider() { # Check if the buildconfig already exists if ! oc get buildconfig test-backstage-customization-provider -n "${project}" > /dev/null 2>&1; then log::info "Creating new app for test-backstage-customization-provider" - oc new-app -S openshift/nodejs:18-minimal-ubi8 - oc new-app https://github.com/janus-qe/test-backstage-customization-provider --image-stream="openshift/nodejs:18-ubi8" --namespace="${project}" + oc new-app openshift/nodejs:18-ubi8~https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}" else log::warn "BuildConfig for test-backstage-customization-provider already exists in ${project}. Skipping new-app creation." fi @@ -826,6 +825,33 @@ check_backstage_running() { else log::warn "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})" oc get pods -n "${namespace}" + + # Early crash detection: fail fast if RHDH pods are in CrashLoopBackOff + # Check both the main deployment and postgresql pods + local crash_pods + crash_pods=$(oc get pods -n "${namespace}" -l "app.kubernetes.io/instance in (${release_name},redhat-developer-hub,developer-hub,${release_name}-postgresql)" \ + -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.phase}{" "}{range .status.containerStatuses[*]}{.state.waiting.reason}{end}{range .status.initContainerStatuses[*]}{.state.waiting.reason}{end}{"\n"}{end}' 2> /dev/null | grep -E "CrashLoopBackOff" || true) + # Also check by name pattern for postgresql pods that may have different labels + if [ -z "${crash_pods}" ]; then + crash_pods=$(oc get pods -n "${namespace}" --no-headers 2> /dev/null | grep -E "(${release_name}|developer-hub|postgresql)" | grep -E "CrashLoopBackOff|Init:CrashLoopBackOff" || true) + fi + + if [ -n "${crash_pods}" ]; then + log::error "❌ Detected pods in CrashLoopBackOff state - failing fast instead of waiting:" + echo "${crash_pods}" + log::error "Deployment status:" + oc get deployment -l "app.kubernetes.io/instance in (${release_name},redhat-developer-hub,developer-hub)" -n "${namespace}" -o wide 2> /dev/null || true + log::error "Recent logs from deployment:" + oc logs deployment/${release_name}-developer-hub -n "${namespace}" --tail=100 --all-containers=true 2> /dev/null \ + || oc logs deployment/${release_name} -n "${namespace}" --tail=100 --all-containers=true 2> /dev/null || true + log::error "Recent events:" + oc get events -n "${namespace}" --sort-by='.lastTimestamp' | tail -20 + mkdir -p "${ARTIFACT_DIR}/${namespace}" + cp -a "/tmp/${LOGFILE}" "${ARTIFACT_DIR}/${namespace}/" || true + save_all_pod_logs "${namespace}" + return 1 + fi + sleep "${wait_seconds}" fi done @@ -1262,7 +1288,11 @@ check_and_test() { if check_backstage_running "${release_name}" "${namespace}" "${url}" "${max_attempts}" "${wait_seconds}"; then echo "Display pods for verification..." oc get pods -n "${namespace}" - run_tests "${release_name}" "${namespace}" "${playwright_project}" "${url}" + if [[ "${SKIP_TESTS:-false}" == "true" ]]; then + log::info "SKIP_TESTS=true, skipping test execution for namespace: ${namespace}" + else + run_tests "${release_name}" "${namespace}" "${playwright_project}" "${url}" + fi else echo "Backstage is not running. Marking deployment as failed and continuing..." CURRENT_DEPLOYMENT=$((CURRENT_DEPLOYMENT + 1)) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 89c0371dfc..406bc980bb 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -3,3 +3,381 @@ The readme for the e2e framework is located [here](../docs/e2e-tests/README.md) The contribution guidelines are [here](../docs/e2e-tests/CONTRIBUTING.MD) The example and bootstraps to create tests are [here](../docs/e2e-tests/examples.md) + +--- + +## Local Test Runner + +This directory contains scripts to run e2e tests locally against an OpenShift cluster. + +### Prerequisites + +Before running, ensure you have: + +1. **Podman** installed and running with at least **8GB RAM** and **4 CPUs** +2. **oc CLI** installed and logged into your OpenShift cluster (`oc login`) +3. **Vault CLI** installed (for fetching secrets) +4. **jq** installed (for JSON parsing) +5. Access to the OpenShift CI vault (`https://vault.ci.openshift.org/ui/vault/secrets/kv/list/selfservice/rhdh-qe/`) - if you don't have access, reach out to @rhdh-qe in the team-rhdh channel. + +#### Installing Prerequisites (macOS) + +```bash +# Install tools via Homebrew +brew install podman jq rsync openshift-cli + +# Install HashiCorp Vault (requires tap) +brew tap hashicorp/tap +brew install hashicorp/tap/vault + +# Setup Podman machine (first time only) +podman machine init --memory 8192 --cpus 4 +podman machine start +``` + +### Getting a Cluster + +You need an OpenShift cluster to run e2e tests. Here are your options: + +#### Option 1: Cluster Bot (Recommended) + +Use the cluster-bot Slack app to request an ephemeral cluster. Send a direct message to the `cluster-bot` app: + +``` +launch 4.18 aws +``` + +The bot will provide login credentials once the cluster is ready (usually within a few minutes). + +#### Option 2: rhdh-test-instance + +You can use the [rhdh-test-instance](https://github.com/redhat-developer/rhdh-test-instance) to get an ephemeral cluster. + +> **⚠️ Warning:** This option is **not recommended for frequent testing** as it may interfere with existing PR test runs due to cluster claim conflicts. Use occasionally or for one-off testing only. + +See the [rhdh-test-instance README](https://github.com/redhat-developer/rhdh-test-instance/blob/main/README.md) for usage instructions. + +#### Option 3: Bring Your Own Cluster + +Use any OpenShift cluster you have access to. Simply login with `oc login` before running the local test runner. + +### Scripts + +| Script | Description | +| --------------------- | --------------------------------------------------------------- | +| `local-run.sh` | Main script - deploys RHDH to cluster and optionally runs tests | +| `container-init.sh` | Runs inside the container (called by local-run.sh) | +| `local-test-setup.sh` | Sets up environment for running tests locally in headed mode | + +### Quick Start + +```bash +cd e2e-tests +./local-run.sh +``` + +Follow the interactive prompts to select: + +1. **Run mode**: Deploy only (default, for headed debugging) or Deploy and run tests +2. **Job type**: OCP Helm PR tests, Nightly tests, Operator tests, etc. +3. **Image type**: + - **Downstream** (`quay.io/rhdh/rhdh-hub-rhel9`): `next`, `latest`, or release-specific tag + - **PR image** (`quay.io/rhdh-community/rhdh`): Enter PR number + +After the container finishes, you're back on your host with the cluster still accessible. + +--- + +### Running Tests Locally in Headed Mode (Recommended for Debugging) + +> **This is the recommended approach for debugging tests** - you can see the browser UI, step through tests, and interact with the application. + +#### Step 1: Deploy RHDH to the cluster + +```bash +cd e2e-tests +./local-run.sh +# Select "Deploy only" (the default option) +``` + +The container will deploy RHDH and exit. You'll see next steps printed in the terminal. + +#### Step 2: Setup environment + +```bash +cd e2e-tests +source local-test-setup.sh # For Showcase tests +# or: source local-test-setup.sh rbac # For RBAC tests +``` + +#### Step 3: Run tests with visible browser + +```bash +yarn install +yarn playwright test --headed +``` + +#### Useful Playwright Commands + +```bash +# Run all tests +yarn playwright test --headed + +# Run a specific test file (use --project to specify which project) +yarn playwright test playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts --headed --project=showcase + +# Run RBAC tests (requires RBAC URL - use: source local-test-setup.sh rbac) +yarn playwright test playwright/e2e/plugins/rbac/rbac.spec.ts --headed --project=showcase-rbac + +# Run tests matching a pattern (by test name) +yarn playwright test --headed -g "guest user" +yarn playwright test --headed -g "catalog" + +# Run with trace for debugging +yarn playwright test --headed --trace on + +# Run in UI mode (interactive debugging with time-travel) +yarn playwright test --ui + +# View the last test report +npx playwright show-report .local-test/rhdh/.local-test/artifact_dir/showcase +``` + +> **Tip:** UI mode (`--ui`) opens an interactive browser where you can: +> +> - See all tests and run them individually +> - Watch tests execute in real-time +> - Step through test actions with time-travel debugging +> - Inspect DOM snapshots at each step +> - View console logs and network requests + +--- + +### Configuration Options + +#### Job Types + +All job types are supported as long as you're logged into the target cluster (`oc login` or `kubectl login`). + +| Option | JOB_NAME Pattern | Description | +| ------ | --------------------------------------- | -------------------------- | +| 1 | `*pull*ocp*helm*` | OCP Helm PR tests | +| 2 | `*ocp*helm*nightly*` | OCP Helm Nightly tests | +| 3 | `*ocp*operator*nightly*` | OCP Operator Nightly tests | +| 4 | `*ocp*helm*upgrade*nightly*` | OCP Helm Upgrade tests | +| 5 | `*ocp*operator*auth-providers*nightly*` | Auth Providers tests | +| 6 | Custom | Enter your own JOB_NAME | + +**Other supported patterns** (use Custom option): + +- `*aks*helm*nightly*` / `*aks*operator*nightly*` - Azure AKS +- `*eks*helm*nightly*` / `*eks*operator*nightly*` - AWS EKS +- `*gke*helm*nightly*` / `*gke*operator*nightly*` - Google GKE +- `*osd-gcp*helm*nightly*` / `*osd-gcp*operator*nightly*` - OSD GCP + +#### Image Repositories + +| Option | Repository | Description | +| ------ | --------------------- | ------------------------- | +| 1 | `rhdh-community/rhdh` | Community image (default) | +| 2 | `rhdh/rhdh-hub-rhel9` | Red Hat official image | +| 3 | Custom | Enter your own repository | + +#### Image Tags + +| Option | Tag | Description | +| ------ | ------------- | ----------------------------------- | +| 1 | `next` | Latest development build (default) | +| 2 | `latest` | Latest stable release | +| 3 | `pr-` | PR-specific build (e.g., `pr-4020`) | +| 4 | Custom | Enter your own tag | + +### Examples + +#### Example 1: Test a PR image + +Test changes from PR #4020 using the community image: + +```bash +cd e2e-tests +./local-run.sh + +# Select: +# Job: 1 (OCP Helm PR tests) +# Repo: 1 (rhdh-community/rhdh) +# Tag: 3 (PR image) → Enter: 4020 +# Run: 1 (Deploy and run tests) +``` + +#### Example 2: Deploy only, debug tests locally + +Deploy and then run specific tests in headed mode: + +```bash +cd e2e-tests +./local-run.sh + +# Select: +# Job: 1 (OCP Helm PR tests) +# Repo: 1 (rhdh-community/rhdh) +# Tag: 1 (next) +# Run: 2 (Deploy only) + +# Keep container running, open new terminal: +cd e2e-tests +source local-test-setup.sh +yarn install +yarn playwright test --headed -g "guest user" +``` + +#### Example 3: Test Red Hat image with nightly job + +Test the official Red Hat image: + +```bash +cd e2e-tests +./local-run.sh + +# Select: +# Job: 2 (OCP Helm Nightly) +# Repo: 2 (rhdh/rhdh-hub-rhel9) +# Tag: 1 (next) +# Run: 1 (Deploy and run tests) +``` + +#### Example 4: Test RBAC functionality locally + +Deploy and run RBAC tests in headed mode: + +```bash +cd e2e-tests +./local-run.sh +# Select: Deploy only + +# New terminal: +cd e2e-tests +source local-test-setup.sh rbac # Use RBAC URL +yarn install +yarn playwright test --headed --project=showcase-rbac +``` + +#### Example 5: Test on AKS/EKS/GKE cluster + +First login to your cluster, then run: + +```bash +# Login to your cluster +az aks get-credentials --resource-group myRG --name myAKS +# or: aws eks update-kubeconfig --name myEKS +# or: gcloud container clusters get-credentials myGKE + +cd e2e-tests +./local-run.sh + +# Select: +# Job: 6 (Custom) → Enter: periodic-ci-aks-helm-nightly +# Repo: 1 (rhdh-community/rhdh) +# Tag: 1 (next) +# Run: 1 (Deploy and run tests) +``` + +#### Example 6: Run a single test file + +```bash +cd e2e-tests +source local-test-setup.sh +yarn install +yarn playwright test playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts --headed --project=showcase +``` + +#### Example 7: Run tests with trace for debugging + +```bash +cd e2e-tests +source local-test-setup.sh +yarn install +yarn playwright test --headed --trace on -g "catalog" +``` + +#### Example 8: Interactive debugging with UI mode + +```bash +cd e2e-tests +source local-test-setup.sh +yarn install +yarn playwright test --ui +``` + +This opens an interactive UI where you can select individual tests, watch them run in real-time, and step through actions with time-travel debugging. + +### How It Works + +1. **local-run.sh**: + - Pulls the e2e-runner container image + - Logs into Vault (OIDC) and gets secrets token + - Creates a service account on the cluster with cluster-admin role + - Copies repo to `e2e-tests/.local-test/rhdh` (keeps original clean) + - Runs container with all credentials + +2. **container-init.sh** (inside container): + - Installs Vault CLI if needed + - Fetches secrets from Vault and writes to `/tmp/secrets/` + - Logs into OpenShift cluster + - Sets up environment variables + - Runs deployment via `openshift-ci-tests.sh` + - If tests are skipped, outputs URLs and saves config + +3. **local-test-setup.sh** (for headed tests): + - Reads config from `e2e-tests/.local-test/rhdh/.local-test/config.env` + - Exports secrets as environment variables (not stored on disk) + - Gets fresh K8S_CLUSTER_TOKEN from cluster + - Sets BASE_URL for Playwright + +### Environment Variables + +After running `local-test-setup.sh`, these variables are set: + +| Variable | Description | +| --------------------------- | --------------------------------------------- | +| `BASE_URL` | URL of the deployed RHDH instance | +| `SHOWCASE_URL` | Showcase deployment URL | +| `SHOWCASE_RBAC_URL` | Showcase RBAC deployment URL | +| `K8S_CLUSTER_URL` | OpenShift API server URL | +| `K8S_CLUSTER_TOKEN` | Service account token (48-hour duration) | +| `JOB_NAME` | Selected job name | +| `QUAY_REPO` | Image repository | +| `TAG_NAME` | Image tag | +| Plus all secrets from Vault | (exported with `-`, `.`, `/` replaced by `_`) | + +### Artifacts and Logs + +Test artifacts are saved to `e2e-tests/.local-test/rhdh/.local-test/`: + +- `artifact_dir/` - Test artifacts, screenshots, traces +- `shared_dir/` - Shared data between test runs +- `config.env` - Configuration for local-test-setup.sh + +### Troubleshooting + +#### Error: Script failed + +The container will drop into an interactive shell for debugging. Check logs, run commands, and investigate. + +#### Error: Config file not found + +Run `./local-run.sh` first with "Deploy only" option to create the config. + +#### Error: Not logged into OpenShift + +Run `oc login` before running local-test-setup.sh. + +#### Error: Image does not exist + +The script verifies the image exists on quay.io before proceeding. For PR images, ensure the PR build has completed. + +### Security Notes + +- Secrets are fetched from Vault and exported as environment variables at runtime (not stored in files locally) +- K8S_CLUSTER_TOKEN is generated fresh each time (not stored) +- The repo is copied to `e2e-tests/.local-test/rhdh` so the original stays clean +- Service account tokens have a 48-hour duration diff --git a/e2e-tests/container-init.sh b/e2e-tests/container-init.sh new file mode 100644 index 0000000000..bb9623ae1d --- /dev/null +++ b/e2e-tests/container-init.sh @@ -0,0 +1,141 @@ +#!/bin/bash + +# Source logging library +# shellcheck source=../.ibm/pipelines/lib/log.sh +source "/tmp/rhdh/.ibm/pipelines/lib/log.sh" + +# Trap errors and exit with error code +handle_error() { + local exit_code=$? + echo "" + log::error "Container script failed! (exit code: $exit_code)" + echo "" + log::info "Check the logs above for details." + log::info "Pod logs are saved to: .local-test/rhdh/.local-test/artifact_dir/" + echo "" + exit $exit_code +} +trap handle_error ERR + +set -e + +# Install vault if not present +if ! command -v vault &> /dev/null; then + VAULT_VERSION="${VAULT_VERSION:-1.15.4}" + log::info "Installing vault ${VAULT_VERSION}..." + curl -fsSL "https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip" -o /tmp/vault.zip + unzip -q /tmp/vault.zip -d /usr/local/bin/ + rm /tmp/vault.zip +fi + +# Fetch and write secrets to /tmp/secrets/ +log::section "Fetching Vault Secrets" +SECRETS=$(vault kv get -format=json -mount="kv" "selfservice/rhdh-qe/rhdh" | jq -r ".data.data") + +for key in $(echo "$SECRETS" | jq -r "keys[]"); do + if [[ "$key" == */* ]]; then + mkdir -p "/tmp/secrets/$(dirname "$key")" + fi + echo "$SECRETS" | jq -r --arg k "$key" '.[$k]' > "/tmp/secrets/$key" +done + +log::success "Secrets written to /tmp/secrets/" + +# Login using service account token from host +log::section "Cluster Service Account and Token Management" +oc login --token="$OC_TOKEN" --server="$OC_SERVER" --insecure-skip-tls-verify=true + +export K8S_CLUSTER_URL="$OC_SERVER" +export K8S_CLUSTER_TOKEN="$OC_TOKEN" +log::info "K8S_CLUSTER_URL: $K8S_CLUSTER_URL" + +log::info "Service account token is valid for 48 hours." + +log::section "Platform Environment Variables" + +export SHARED_DIR="/tmp/rhdh/.local-test/shared_dir" +mkdir -p "$SHARED_DIR" +log::info "SHARED_DIR=${SHARED_DIR}" + +export ARTIFACT_DIR="/tmp/rhdh/.local-test/artifact_dir" +mkdir -p "$ARTIFACT_DIR" +log::info "ARTIFACT_DIR=${ARTIFACT_DIR}" + +export IS_OPENSHIFT="true" +log::info "IS_OPENSHIFT=${IS_OPENSHIFT}" + +# These are passed from local-run.sh - export them for child scripts +export JOB_NAME +export QUAY_REPO +export TAG_NAME +export SKIP_TESTS +log::info "JOB_NAME=${JOB_NAME}" +log::info "QUAY_REPO=${QUAY_REPO}" +log::info "TAG_NAME=${TAG_NAME}" +log::info "SKIP_TESTS=${SKIP_TESTS}" + +export RELEASE_BRANCH_NAME="main" +log::info "RELEASE_BRANCH_NAME=${RELEASE_BRANCH_NAME}" + +export CONTAINER_PLATFORM="ocp" +log::info "CONTAINER_PLATFORM=${CONTAINER_PLATFORM}" + +log::info "Getting container platform version" +CONTAINER_PLATFORM_VERSION=$(oc version --output json 2> /dev/null | jq -r ".openshiftVersion" | cut -d"." -f1,2 || echo "unknown") +export CONTAINER_PLATFORM_VERSION +log::info "CONTAINER_PLATFORM_VERSION=${CONTAINER_PLATFORM_VERSION}" + +log::section "Current branch" +cd /tmp/rhdh +log::info "Current branch: $(git branch --show-current)" +log::info "Using Image: ${QUAY_REPO}:${TAG_NAME}" + +log::section "Test Execution" +log::info "Executing openshift-ci-tests.sh" +bash ./.ibm/pipelines/openshift-ci-tests.sh + +log::section "Done" + +# Get URLs dynamically based on deployment type +# Operator deployments use different route names than Helm deployments +if [[ "$JOB_NAME" == *"operator"* ]]; then + # Operator deployments: fetch routes dynamically from the namespaces + SHOWCASE_URL=$(oc get route -n showcase -o jsonpath='{.items[0].spec.host}' 2>/dev/null | xargs -I{} echo "https://{}" || echo "") + SHOWCASE_RBAC_URL=$(oc get route -n showcase-rbac -o jsonpath='{.items[0].spec.host}' 2>/dev/null | xargs -I{} echo "https://{}" || echo "") +else + # Helm deployments: construct URLs from router base + K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + SHOWCASE_URL="https://rhdh-developer-hub-showcase.${K8S_CLUSTER_ROUTER_BASE}" + SHOWCASE_RBAC_URL="https://rhdh-rbac-developer-hub-showcase-rbac.${K8S_CLUSTER_ROUTER_BASE}" +fi + +# Always write config to .local-test/config.env for local-test-setup.sh to read +cat > /tmp/rhdh/.local-test/config.env < /dev/null; then + MISSING_CMDS="$MISSING_CMDS $cmd" + PREREQ_FAILED=true + fi +done + +# Check if podman machine is running +if command -v podman &> /dev/null; then + PODMAN_RUNNING=$(podman machine list --format '{{.Name}} {{.Running}}' 2>/dev/null | grep -w "true" | head -1 || true) + if [[ -z "$PODMAN_RUNNING" ]]; then + log::error "No podman machine is running" + log::info " Run: podman machine start" + PREREQ_FAILED=true + else + # Warn if memory or CPUs are low + MACHINE_NAME=$(echo "$PODMAN_RUNNING" | awk '{print $1}') + MACHINE_MEM=$(podman machine list --format '{{.Name}} {{.Memory}}' | grep "^${MACHINE_NAME}" | awk '{print $2}') + MACHINE_CPUS=$(podman machine list --format '{{.Name}} {{.CPUs}}' | grep "^${MACHINE_NAME}" | awk '{print $2}') + MEM_GB=$(echo "$MACHINE_MEM" | sed 's/GiB//' | sed 's/MiB//') + if [[ "$MACHINE_MEM" == *"MiB"* ]] || [[ $(echo "$MEM_GB < 8" | bc -l) -eq 1 ]]; then + log::warn "Podman machine '$MACHINE_NAME' has only $MACHINE_MEM RAM" + log::info " Recommend at least 8GB RAM and 4 CPUs for Playwright tests" + log::info " Run: podman machine stop $MACHINE_NAME && podman machine set $MACHINE_NAME --memory 8192 --cpus 4 && podman machine start $MACHINE_NAME" + elif [[ "$MACHINE_CPUS" -lt 4 ]]; then + log::warn "Podman machine '$MACHINE_NAME' has only $MACHINE_CPUS CPUs" + log::info " Recommend at least 8GB RAM and 4 CPUs for Playwright tests" + fi + fi +fi + +# Check if logged into OpenShift +if command -v oc &> /dev/null; then + if ! oc whoami &> /dev/null; then + log::error "Not logged into OpenShift" + log::info " Run: oc login " + PREREQ_FAILED=true + fi +fi + +if [[ -n "$MISSING_CMDS" ]]; then + log::error "Missing required commands:$MISSING_CMDS" + log::info " Install missing tools:" + log::info " brew install podman jq rsync openshift-cli" + log::info " (bc is pre-installed on macOS, install via 'brew install bc' if missing)" + log::info " brew tap hashicorp/tap && brew install hashicorp/tap/vault" +fi + +if [[ "$PREREQ_FAILED" == "true" ]]; then + exit 1 +fi + +# ========== Interactive Configuration ========== +log::section "RHDH Local Test Runner" + +# Check for previous configuration +USE_PREVIOUS="false" +if [[ -f "$RUN_CONFIG_FILE" ]]; then + echo "Previous configuration found:" + echo "----------------------------------------" + source "$RUN_CONFIG_FILE" + echo " JOB_NAME: $JOB_NAME" + echo " IMAGE: quay.io/${QUAY_REPO}:${TAG_NAME}" + echo " SKIP_TESTS: $SKIP_TESTS" + echo "----------------------------------------" + echo "" + read -r -p "Use previous configuration? [Y/n]: " use_prev_choice + use_prev_choice=${use_prev_choice:-Y} + if [[ "$use_prev_choice" =~ ^[Yy]$ ]]; then + USE_PREVIOUS="true" + echo "" + fi +fi + +if [[ "$USE_PREVIOUS" == "false" ]]; then + # Run mode selection (Deploy only is default for local debugging) + echo "What do you want to run?" + echo " 1) Deploy only (recommended for local headed debugging)" + echo " 2) Deploy and run tests (headless mode, runs all tests)" + echo "" + read -r -p "Enter choice [1]: " run_choice + run_choice=${run_choice:-1} + + case "$run_choice" in + 1) SKIP_TESTS="true" ;; + 2) SKIP_TESTS="false" ;; + *) SKIP_TESTS="true" ;; + esac + echo "" + + # Job selection + echo "Select test job to run:" + echo " 1) OCP Helm PR tests (pull-ci-*-ocp-helm)" + echo " 2) OCP Helm Nightly tests (*ocp*helm*nightly*)" + echo " 3) OCP Operator Nightly (*ocp*operator*nightly*)" + echo " 4) OCP Helm Upgrade (*ocp*helm*upgrade*nightly*)" + echo " 5) Auth Providers (*ocp*operator*auth-providers*nightly*)" + echo " 6) Custom job name" + echo "" + read -r -p "Enter choice [1]: " job_choice + job_choice=${job_choice:-1} + + case "$job_choice" in + 1) JOB_NAME="pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm" ;; + 2) JOB_NAME="periodic-ci-ocp-helm-nightly" ;; + 3) JOB_NAME="periodic-ci-ocp-operator-nightly" ;; + 4) JOB_NAME="periodic-ci-ocp-helm-upgrade-nightly" ;; + 5) JOB_NAME="periodic-ci-ocp-operator-auth-providers-nightly" ;; + 6) + read -r -p "Enter custom JOB_NAME: " JOB_NAME + ;; + *) JOB_NAME="pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm" ;; + esac + echo "JOB_NAME: $JOB_NAME" + echo "" + + # Image selection - Downstream vs PR + echo "Select image type:" + echo " 1) Downstream image (quay.io/rhdh/rhdh-hub-rhel9)" + echo " 2) PR image (quay.io/rhdh-community/rhdh)" + echo "" + read -r -p "Enter choice [1]: " image_type_choice + image_type_choice=${image_type_choice:-1} + + case "$image_type_choice" in + 1) + # Downstream image + QUAY_REPO="rhdh/rhdh-hub-rhel9" + echo "" + echo "Select image tag (quay.io/rhdh/rhdh-hub-rhel9):" + echo " 1) next (latest development build)" + echo " 2) latest (latest stable release)" + echo " 3) Release-specific tag (e.g., 1.5, 1.4)" + echo "" + read -r -p "Enter choice [1]: " tag_choice + tag_choice=${tag_choice:-1} + + case "$tag_choice" in + 1) TAG_NAME="next" ;; + 2) TAG_NAME="latest" ;; + 3) + read -r -p "Enter release tag (e.g., 1.5): " TAG_NAME + ;; + *) TAG_NAME="next" ;; + esac + ;; + 2) + # PR image + QUAY_REPO="rhdh-community/rhdh" + echo "" + read -r -p "Enter PR number (quay.io/rhdh-community/rhdh:pr-): " PR_NUMBER + TAG_NAME="pr-${PR_NUMBER}" + ;; + *) + QUAY_REPO="rhdh/rhdh-hub-rhel9" + TAG_NAME="next" + ;; + esac + echo "" + echo "Image: quay.io/${QUAY_REPO}:${TAG_NAME}" + echo "" + + # Save configuration for next run + mkdir -p "$(dirname "$RUN_CONFIG_FILE")" + cat > "$RUN_CONFIG_FILE" </dev/null || log::info "Namespace already exists" +oc create serviceaccount "$SA_NAME" -n "$SA_NAMESPACE" 2>/dev/null || log::info "Service account already exists" +oc adm policy add-cluster-role-to-user cluster-admin "system:serviceaccount:${SA_NAMESPACE}:${SA_NAME}" 2>/dev/null || true +OC_TOKEN=$(oc create token "$SA_NAME" -n "$SA_NAMESPACE" --duration=48h) +log::info "K8S_CLUSTER_URL: $OC_SERVER" + +# Copy repo to work directory (keeps original repo clean) +log::section "Copying repo to work directory" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +WORK_DIR="$SCRIPT_DIR/.local-test/rhdh" +rm -rf "$WORK_DIR" +mkdir -p "$WORK_DIR" +rsync -a --exclude='node_modules' --exclude='.local-test' --exclude='playwright-report' --exclude='test-results' "$REPO_ROOT/" "$WORK_DIR/" +log::info "Work copy created at: $WORK_DIR" + +# Run container with vault credentials and OC token +log::section "Starting Container (rhdh-e2e-runner)" +log::info "Running container (rhdh-e2e-runner)..." +log::info "This will deploy RHDH to your cluster and run tests (if enabled)." +echo "" + +# Create log file for container output +CONTAINER_LOG="$SCRIPT_DIR/.local-test/container.log" +mkdir -p "$(dirname "$CONTAINER_LOG")" +log::info "Container log: $CONTAINER_LOG" +echo "" + +CONTAINER_EXIT_CODE=0 +podman run -v "$WORK_DIR":/tmp/rhdh \ + -v "$SCRIPT_DIR/container-init.sh":/tmp/container-init.sh:ro \ + -it -u root --privileged \ + --mount type=tmpfs,destination=/tmp/secrets \ + -e VAULT_ADDR="$VAULT_ADDR" \ + -e VAULT_TOKEN="$VAULT_TOKEN" \ + -e OC_SERVER="$OC_SERVER" \ + -e OC_TOKEN="$OC_TOKEN" \ + -e JOB_NAME="$JOB_NAME" \ + -e QUAY_REPO="$QUAY_REPO" \ + -e TAG_NAME="$TAG_NAME" \ + -e SKIP_TESTS="$SKIP_TESTS" \ + "$RUNNER_IMAGE" \ + /bin/bash /tmp/container-init.sh 2>&1 | tee "$CONTAINER_LOG" +CONTAINER_EXIT_CODE=${PIPESTATUS[0]} + +# Container has exited - show next steps +echo "" +log::section "Container (rhdh-e2e-runner) Finished - Back on Host" +log::info "You are now back on your host machine." +log::info "You are still logged into the cluster via 'oc' CLI." +echo "" + +if [[ "$CONTAINER_EXIT_CODE" -ne 0 ]]; then + log::error "Container (rhdh-e2e-runner) exited with error code: $CONTAINER_EXIT_CODE" + echo "" + log::info "Troubleshooting:" + echo " - Container log: $CONTAINER_LOG" + echo " - Pod logs: e2e-tests/.local-test/rhdh/.local-test/artifact_dir/" + echo " - Check cluster pods: oc get pods -A" + echo " - Check pod logs: oc logs -n " + echo "" + exit $CONTAINER_EXIT_CODE +fi + +if [[ "$SKIP_TESTS" == "true" ]]; then + log::section "Next Steps: Run Tests Locally (headed mode)" + echo "" + log::info "1. Setup environment variables:" + echo " source local-test-setup.sh # For Showcase tests" + echo " source local-test-setup.sh rbac # For RBAC tests" + echo "" + log::info "2. Install dependencies and run tests:" + echo " yarn install" + echo " yarn playwright test --headed" + echo "" + log::info "Useful Playwright commands:" + echo "" + echo " # Run all tests for a project" + echo " yarn playwright test --headed --project=showcase" + echo " yarn playwright test --headed --project=showcase-rbac" + echo "" + echo " # Run a specific test file (use --workers=1 for sequential execution)" + echo " yarn playwright test --headed --project=showcase-rbac --workers=1 playwright/e2e/plugins/rbac/rbac.spec.ts" + echo " yarn playwright test --headed --project=showcase --workers=1 playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts" + echo "" + echo " # Run tests matching a pattern" + echo " yarn playwright test --headed --project=showcase-rbac --workers=1 -g \"guest user\"" + echo " yarn playwright test --headed --project=showcase --workers=1 -g \"catalog\"" + echo "" + echo " # Interactive UI mode" + echo " yarn playwright test --ui --project=showcase" + echo " yarn playwright test --ui --project=showcase-rbac" + echo "" +else + log::section "Tests Completed" + log::info "Test artifacts saved to: e2e-tests/.local-test/rhdh/.local-test/artifact_dir/" + echo "" + log::info "To view test reports:" + echo " npx playwright show-report .local-test/rhdh/.local-test/artifact_dir/showcase" + echo "" + log::info "To re-run tests locally (headed mode):" + echo " source local-test-setup.sh" + echo " yarn playwright test --headed" + echo "" +fi \ No newline at end of file diff --git a/e2e-tests/local-test-setup.sh b/e2e-tests/local-test-setup.sh new file mode 100755 index 0000000000..eb8d81fda7 --- /dev/null +++ b/e2e-tests/local-test-setup.sh @@ -0,0 +1,128 @@ +#!/bin/bash +# This script sets up your local environment for running Playwright tests in headed mode. +# It reads config from /tmp/rhdh/.local-test/config.env and exports all secrets as environment variables. +# +# Usage (run from e2e-tests directory): +# source local-test-setup.sh [showcase|rbac] +# +# Examples: +# cd e2e-tests +# source local-test-setup.sh # Uses Showcase URL (default) +# source local-test-setup.sh showcase # Uses Showcase URL +# source local-test-setup.sh rbac # Uses Showcase RBAC URL +# +# After sourcing, you can run tests: +# yarn install +# yarn playwright test --headed + +# Get script directory (works even when sourced) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORK_DIR="$SCRIPT_DIR/.local-test/rhdh" +CONFIG_FILE="$WORK_DIR/.local-test/config.env" + +# Source logging library +# shellcheck source=../.ibm/pipelines/lib/log.sh +source "$SCRIPT_DIR/../.ibm/pipelines/lib/log.sh" + +# Check if config file exists +if [[ ! -f "$CONFIG_FILE" ]]; then + log::error "Config file not found: $CONFIG_FILE" + echo "" + log::info "Please run deployment first:" + log::info " cd e2e-tests && ./local-run.sh" + echo "" + log::info "Note: The work copy is created at e2e-tests/.local-test/rhdh" + return 1 2>/dev/null || exit 1 +fi + +# Load config +log::info "Loading config from: $CONFIG_FILE" +source "$CONFIG_FILE" + +# Select URL based on argument +TEST_TYPE="${1:-showcase}" +case "$TEST_TYPE" in + showcase) + export BASE_URL="$SHOWCASE_URL" + log::info "Test type: Showcase" + ;; + rbac) + export BASE_URL="$SHOWCASE_RBAC_URL" + log::info "Test type: Showcase RBAC" + ;; + *) + log::error "Unknown test type: $TEST_TYPE" + log::info "Valid options: showcase, rbac" + return 1 2>/dev/null || exit 1 + ;; +esac + +log::info "BASE_URL: $BASE_URL" +echo "" + +# Export config vars +export JOB_NAME +export QUAY_REPO +export TAG_NAME +export K8S_CLUSTER_URL +export SHOWCASE_URL +export SHOWCASE_RBAC_URL + +log::info "Configuration:" +log::info " JOB_NAME: $JOB_NAME" +log::info " IMAGE: quay.io/${QUAY_REPO}:${TAG_NAME}" +log::info " K8S_CLUSTER_URL: $K8S_CLUSTER_URL" +echo "" + +# Get K8S_CLUSTER_TOKEN fresh (not stored in file for security) +log::info "Getting K8S_CLUSTER_TOKEN from cluster..." +if ! oc whoami &>/dev/null; then + log::error "Not logged into OpenShift." + log::info "Please login first: oc login" + return 1 2>/dev/null || exit 1 +fi +# Use the existing service account token created during deployment +SA_NAME="rhdh-local-tester" +SA_NAMESPACE="rhdh-local-test" +K8S_CLUSTER_TOKEN=$(oc create token "$SA_NAME" -n "$SA_NAMESPACE" --duration=48h) +export K8S_CLUSTER_TOKEN +log::success "K8S_CLUSTER_TOKEN: [set]" +echo "" + +export VAULT_ADDR='https://vault.ci.openshift.org' + +# Check if already logged into vault +if ! vault token lookup &>/dev/null; then + log::info "Logging into vault..." + vault login -no-print -method=oidc +fi + +log::info "Exporting secrets as environment variables..." +# Export secrets safely without eval (avoids code injection risk) +# Replaces -, . and / with _ in key names (env vars can only have alphanumeric and _) +while IFS= read -r line; do + # Extract key (everything before first =) and value (everything after first =) + key="${line%%=*}" + value="${line#*=}" + # Skip metadata keys + [[ "$key" == "secretsync/"* ]] && continue + # Sanitize key name (replace -, . and / with _) + safe_key=$(echo "$key" | tr '-./' '___') + export "$safe_key"="$value" +done < <(vault kv get -format=json -mount="kv" "selfservice/rhdh-qe/rhdh" | jq -r '.data.data | to_entries[] | "\(.key)=\(.value)"') + +log::section "Environment Ready" +log::info "Available URLs:" +log::info " Showcase: $SHOWCASE_URL" +log::info " Showcase RBAC: $SHOWCASE_RBAC_URL" +echo "" +log::info "Current BASE_URL: $BASE_URL" +echo "" +log::info "To run tests:" +echo " cd e2e-tests" +echo " yarn install" +echo " yarn playwright test --headed" +echo "" +log::info "To switch to RBAC tests:" +echo " export BASE_URL=\"$SHOWCASE_RBAC_URL\"" +echo "" From ef25b51815d16313925a27a730583215791ae643 Mon Sep 17 00:00:00 2001 From: Subhash Khileri Date: Wed, 21 Jan 2026 20:40:02 +0530 Subject: [PATCH 2/4] fix(pipelines): dynamically select latest nodejs UBI9 image tag Instead of hardcoding nodejs:18-ubi8, query the cluster for available nodejs imagestream tags and select the latest UBI9 version. Falls back to 18-ubi8 if no UBI9 tags are found. Co-Authored-By: Claude Opus 4.5 --- .ibm/pipelines/utils.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 11901def6a..ada0f9c38b 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -665,8 +665,13 @@ deploy_test_backstage_customization_provider() { # Check if the buildconfig already exists if ! oc get buildconfig test-backstage-customization-provider -n "${project}" > /dev/null 2>&1; then - log::info "Creating new app for test-backstage-customization-provider" - oc new-app openshift/nodejs:18-ubi8~https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}" + # Get latest nodejs UBI9 tag from cluster, fallback to 18-ubi8 + local nodejs_tag + nodejs_tag=$(oc get imagestream nodejs -n openshift -o jsonpath='{.spec.tags[*].name}' 2>/dev/null | \ + tr ' ' '\n' | grep -E '^[0-9]+-ubi9$' | sort -t'-' -k1 -n | tail -1) + nodejs_tag="${nodejs_tag:-18-ubi8}" + log::info "Creating new app for test-backstage-customization-provider using nodejs:${nodejs_tag}" + oc new-app "openshift/nodejs:${nodejs_tag}~https://github.com/janus-qe/test-backstage-customization-provider" --namespace="${project}" else log::warn "BuildConfig for test-backstage-customization-provider already exists in ${project}. Skipping new-app creation." fi From 243a9ea49b2c4be1d65fbcf3ce41f5bddd546c97 Mon Sep 17 00:00:00 2001 From: Subhash Khileri Date: Thu, 22 Jan 2026 09:53:47 +0530 Subject: [PATCH 3/4] Update .ibm/pipelines/utils.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Zbyněk Drápela <61500440+zdrapela@users.noreply.github.com> --- .ibm/pipelines/utils.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index ada0f9c38b..b67a4c67a0 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -842,7 +842,7 @@ check_backstage_running() { fi if [ -n "${crash_pods}" ]; then - log::error "❌ Detected pods in CrashLoopBackOff state - failing fast instead of waiting:" + log::error "Detected pods in CrashLoopBackOff state - failing fast instead of waiting:" echo "${crash_pods}" log::error "Deployment status:" oc get deployment -l "app.kubernetes.io/instance in (${release_name},redhat-developer-hub,developer-hub)" -n "${namespace}" -o wide 2> /dev/null || true From 82835ea24c8777c4234b21a4551b83e68b2fbc56 Mon Sep 17 00:00:00 2001 From: Subhash Khileri Date: Thu, 22 Jan 2026 21:30:36 +0530 Subject: [PATCH 4/4] feat(e2e-tests): add CLI flags for non-interactive local test runner Add CLI flags (-j, -r, -t, -p, -s) to local-run.sh for automation and quick runs without interactive prompts. Also improve container-init.sh to pre-compute and save config before deployment so URLs are available even if deployment fails. Additional fixes: - Fix secrets parsing in local-test-setup.sh to handle special chars - Minor formatting fix in utils.sh for piped command Co-Authored-By: Claude Opus 4.5 --- .ibm/pipelines/utils.sh | 4 +- e2e-tests/README.md | 24 +++++++++ e2e-tests/container-init.sh | 39 +++++++++------ e2e-tests/local-run.sh | 91 +++++++++++++++++++++++++++++++++-- e2e-tests/local-test-setup.sh | 14 +++--- 5 files changed, 143 insertions(+), 29 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index b67a4c67a0..8e2f1fe985 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -667,8 +667,8 @@ deploy_test_backstage_customization_provider() { if ! oc get buildconfig test-backstage-customization-provider -n "${project}" > /dev/null 2>&1; then # Get latest nodejs UBI9 tag from cluster, fallback to 18-ubi8 local nodejs_tag - nodejs_tag=$(oc get imagestream nodejs -n openshift -o jsonpath='{.spec.tags[*].name}' 2>/dev/null | \ - tr ' ' '\n' | grep -E '^[0-9]+-ubi9$' | sort -t'-' -k1 -n | tail -1) + nodejs_tag=$(oc get imagestream nodejs -n openshift -o jsonpath='{.spec.tags[*].name}' 2> /dev/null \ + | tr ' ' '\n' | grep -E '^[0-9]+-ubi9$' | sort -t'-' -k1 -n | tail -1) nodejs_tag="${nodejs_tag:-18-ubi8}" log::info "Creating new app for test-backstage-customization-provider using nodejs:${nodejs_tag}" oc new-app "openshift/nodejs:${nodejs_tag}~https://github.com/janus-qe/test-backstage-customization-provider" --namespace="${project}" diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 406bc980bb..06cf1115de 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -86,6 +86,30 @@ Follow the interactive prompts to select: After the container finishes, you're back on your host with the cluster still accessible. +### CLI Flags (Non-Interactive Mode) + +For automation or quick runs, use CLI flags to skip interactive prompts: + +```bash +# Test a PR image +./local-run.sh --pr 4023 --skip-tests + +# Deploy downstream next image +./local-run.sh --repo rhdh/rhdh-hub-rhel9 --tag next --skip-tests + +# Full flags +./local-run.sh -j pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm -r rhdh/rhdh-hub-rhel9 -t next -s +``` + +| Flag | Description | +| ------------------ | -------------------------------------------------------------------- | +| `-j, --job` | Job name | +| `-r, --repo` | Quay repository (e.g., `rhdh/rhdh-hub-rhel9`) | +| `-t, --tag` | Image tag (e.g., `next`, `latest`, `1.5`) | +| `-p, --pr` | PR number (sets repo to `rhdh-community/rhdh`, tag to `pr-`) | +| `-s, --skip-tests` | Deploy only, skip running tests | +| `-h, --help` | Show help message | + --- ### Running Tests Locally in Headed Mode (Recommended for Debugging) diff --git a/e2e-tests/container-init.sh b/e2e-tests/container-init.sh index bb9623ae1d..d543991032 100644 --- a/e2e-tests/container-init.sh +++ b/e2e-tests/container-init.sh @@ -90,26 +90,19 @@ cd /tmp/rhdh log::info "Current branch: $(git branch --show-current)" log::info "Using Image: ${QUAY_REPO}:${TAG_NAME}" -log::section "Test Execution" -log::info "Executing openshift-ci-tests.sh" -bash ./.ibm/pipelines/openshift-ci-tests.sh - -log::section "Done" - -# Get URLs dynamically based on deployment type -# Operator deployments use different route names than Helm deployments +# Pre-compute URLs and save config BEFORE deployment (so it's available even if deployment fails) +log::section "Preparing Configuration" +K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') if [[ "$JOB_NAME" == *"operator"* ]]; then - # Operator deployments: fetch routes dynamically from the namespaces - SHOWCASE_URL=$(oc get route -n showcase -o jsonpath='{.items[0].spec.host}' 2>/dev/null | xargs -I{} echo "https://{}" || echo "") - SHOWCASE_RBAC_URL=$(oc get route -n showcase-rbac -o jsonpath='{.items[0].spec.host}' 2>/dev/null | xargs -I{} echo "https://{}" || echo "") + SHOWCASE_URL="https://backstage-showcase.${K8S_CLUSTER_ROUTER_BASE}" + SHOWCASE_RBAC_URL="https://backstage-showcase-rbac.${K8S_CLUSTER_ROUTER_BASE}" else - # Helm deployments: construct URLs from router base - K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') SHOWCASE_URL="https://rhdh-developer-hub-showcase.${K8S_CLUSTER_ROUTER_BASE}" SHOWCASE_RBAC_URL="https://rhdh-rbac-developer-hub-showcase-rbac.${K8S_CLUSTER_ROUTER_BASE}" fi -# Always write config to .local-test/config.env for local-test-setup.sh to read +# Save config early so it's available even if one deployment fails +mkdir -p /tmp/rhdh/.local-test cat > /tmp/rhdh/.local-test/config.env <) + -s, --skip-tests Deploy only, skip running tests + -h, --help Show this help message + +Examples: + # Interactive mode (default) + ./local-run.sh + + # Deploy downstream next image, skip tests + ./local-run.sh --repo rhdh/rhdh-hub-rhel9 --tag next --skip-tests + + # Test a PR image + ./local-run.sh --pr 4023 --skip-tests + + # Full flags + ./local-run.sh -j pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm -r rhdh/rhdh-hub-rhel9 -t next -s + +EOF + exit 0 +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -j|--job) + CLI_JOB_NAME="$2" + shift 2 + ;; + -r|--repo) + CLI_QUAY_REPO="$2" + shift 2 + ;; + -t|--tag) + CLI_TAG_NAME="$2" + shift 2 + ;; + -p|--pr) + CLI_QUAY_REPO="rhdh-community/rhdh" + CLI_TAG_NAME="pr-$2" + shift 2 + ;; + -s|--skip-tests) + CLI_SKIP_TESTS="true" + shift + ;; + -h|--help) + show_help + ;; + *) + log::error "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + # ========== Prerequisites Check ========== PREREQ_FAILED=false MISSING_CMDS="" @@ -69,9 +136,21 @@ fi # ========== Interactive Configuration ========== log::section "RHDH Local Test Runner" -# Check for previous configuration +# Check if CLI flags provide all required options (skip interactive mode) +CLI_MODE="false" +if [[ -n "$CLI_QUAY_REPO" && -n "$CLI_TAG_NAME" ]]; then + CLI_MODE="true" + JOB_NAME="${CLI_JOB_NAME:-pull-ci-redhat-developer-rhdh-main-e2e-ocp-helm}" + QUAY_REPO="$CLI_QUAY_REPO" + TAG_NAME="$CLI_TAG_NAME" + SKIP_TESTS="${CLI_SKIP_TESTS:-false}" + log::info "Using CLI flags (non-interactive mode)" + echo "" +fi + +# Check for previous configuration (only if not in CLI mode) USE_PREVIOUS="false" -if [[ -f "$RUN_CONFIG_FILE" ]]; then +if [[ "$CLI_MODE" == "false" && -f "$RUN_CONFIG_FILE" ]]; then echo "Previous configuration found:" echo "----------------------------------------" source "$RUN_CONFIG_FILE" @@ -88,7 +167,7 @@ if [[ -f "$RUN_CONFIG_FILE" ]]; then fi fi -if [[ "$USE_PREVIOUS" == "false" ]]; then +if [[ "$CLI_MODE" == "false" && "$USE_PREVIOUS" == "false" ]]; then # Run mode selection (Deploy only is default for local debugging) echo "What do you want to run?" echo " 1) Deploy only (recommended for local headed debugging)" @@ -210,8 +289,10 @@ log::info "JOB_NAME: $JOB_NAME" log::info "IMAGE: quay.io/${QUAY_REPO}:${TAG_NAME}" log::info "SKIP_TESTS: $SKIP_TESTS" echo "" -read -r -p "Press Enter to continue or Ctrl+C to abort..." -echo "" +if [[ "$CLI_MODE" == "false" ]]; then + read -r -p "Press Enter to continue or Ctrl+C to abort..." + echo "" +fi # Pull runner image first (can take a while) log::section "Pulling runner container image" diff --git a/e2e-tests/local-test-setup.sh b/e2e-tests/local-test-setup.sh index eb8d81fda7..5f637afdbb 100755 --- a/e2e-tests/local-test-setup.sh +++ b/e2e-tests/local-test-setup.sh @@ -99,17 +99,17 @@ fi log::info "Exporting secrets as environment variables..." # Export secrets safely without eval (avoids code injection risk) +# Uses base64 encoding to safely handle special characters in values # Replaces -, . and / with _ in key names (env vars can only have alphanumeric and _) -while IFS= read -r line; do - # Extract key (everything before first =) and value (everything after first =) - key="${line%%=*}" - value="${line#*=}" +SECRETS_JSON=$(vault kv get -format=json -mount="kv" "selfservice/rhdh-qe/rhdh" | jq -r '.data.data') +for key in $(echo "$SECRETS_JSON" | jq -r 'keys[]'); do # Skip metadata keys [[ "$key" == "secretsync/"* ]] && continue - # Sanitize key name (replace -, . and / with _) - safe_key=$(echo "$key" | tr '-./' '___') + # Get value and sanitize key name (put - at end for macOS tr compatibility) + value=$(echo "$SECRETS_JSON" | jq -r --arg k "$key" '.[$k]') + safe_key=$(echo "$key" | tr './-' '___') export "$safe_key"="$value" -done < <(vault kv get -format=json -mount="kv" "selfservice/rhdh-qe/rhdh" | jq -r '.data.data | to_entries[] | "\(.key)=\(.value)"') +done log::section "Environment Ready" log::info "Available URLs:"