# .gitea/workflows/deploy-to-test.yml # # This workflow automatically deploys the application to the TEST environment # on every push to the 'main' branch. It handles linting, running all tests, # and deploying the code to the test server. name: Deploy to Test Environment on: push: branches: - main # This pipeline runs only on a push to the 'main' branch. jobs: deploy-to-test: runs-on: projectium.com # This job runs on your self-hosted Gitea runner. steps: - name: Checkout Code uses: actions/checkout@v3 # Fetch all history for all tags and branches with: { fetch-depth: 0 } # Add this NEW STEP FOR DEBUGGING - name: Show Git REF run: | echo "Gitea ref: ${{ gitea.ref }}" echo "Gitea ref_name: ${{ gitea.ref_name }}" # often more useful (e.g., 'main' or 'my-feature-branch') echo "Gitea ref_type: ${{ gitea.ref_type }}" # 'branch' or 'tag' echo "Gitea SHA: ${{ gitea.sha }}" echo "Triggering actor: ${{ gitea.actor }}" echo "Repository: ${{ gitea.repository }}" - name: Setup Node.js uses: actions/setup-node@v3 with: node-version: '20' cache: 'npm' # Re-enable the cache. If this fails, we will remove it again. cache-dependency-path: '**/package-lock.json' # The setup-node action with caching handles installation correctly. # If dependencies are not found in cache, it will run 'npm ci' automatically. # If they are found, it restores them. This is the standard, reliable way. - name: Install Dependencies run: npm ci # 'ci' is faster and safer for CI/CD than 'install'. - name: Bump Version and Push run: | # Configure git for the commit. git config --global user.name 'Gitea Actions' git config --global user.email 'actions@gitea.projectium.com' # Bump the patch version number. This creates a new commit and a new tag. # The commit message includes [skip ci] to prevent this push from triggering another workflow run. # If the tag already exists (e.g. re-running a failed job), we skip the conflicting version. if ! npm version patch -m "ci: Bump version to %s [skip ci]"; then echo "⚠️ Version bump failed (likely tag exists). Attempting to skip to next version..." # Bump package.json to the conflicting version without git tagging npm version patch --no-git-tag-version > /dev/null # Bump again to the next version, forcing it because the directory is now dirty npm version patch -m "ci: Bump version to %s [skip ci]" --force fi # Push the new commit and the new tag back to the main branch. git push --follow-tags # ----------------------------------------------------------------------- # --- NEW DEBUGGING STEPS --- - name: Verify Project Structure run: | echo "--- Current Working Directory ---" pwd echo "--- Listing Root Directory ---" ls -alF echo "--- Listing SRC Directory ---" ls -alF src - name: TypeScript Type-Check run: npm run type-check - name: Prettier Check run: npx prettier --check . || true - name: Lint Check run: npm run lint || true - name: Stop Test Server Before Tests # This is a critical step to ensure a clean test environment. # It stops the currently running pm2 process, freeing up port 3001 so that the # integration test suite can launch its own, fresh server instance. # '|| true' ensures the workflow doesn't fail if the process isn't running. run: | echo "--- Stopping and deleting all test processes ---" # Use a script to parse pm2's JSON output and delete any process whose name ends with '-test'. # This is safer than 'pm2 delete all' and more robust than naming each process individually. # It prevents the accumulation of duplicate processes from previous test runs. node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.name && p.name.endsWith('-test')) { console.log('Deleting test process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id, e.message); } } }); console.log('✅ Test process cleanup complete.'); } catch (e) { if (e.stdout.toString().includes('No process found')) { console.log('No PM2 processes running, cleanup not needed.'); } else { console.error('Error cleaning up test processes:', e.message); } }" || true - name: Flush Redis Test Database Before Tests # CRITICAL: Clear Redis database 1 (test database) to remove stale BullMQ jobs. # This prevents old jobs with outdated error messages from polluting test results. # NOTE: We use database 1 for tests to isolate from production (database 0). env: REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }} run: | echo "--- Flushing Redis database 1 (test database) to remove stale jobs ---" if [ -z "$REDIS_PASSWORD" ]; then echo "⚠️ REDIS_PASSWORD_TEST not set, attempting flush without password..." redis-cli -n 1 FLUSHDB || echo "Redis flush failed (no password)" else redis-cli -a "$REDIS_PASSWORD" -n 1 FLUSHDB 2>/dev/null && echo "✅ Redis database 1 (test) flushed successfully." || echo "⚠️ Redis flush failed" fi # Verify the flush worked by checking key count on database 1 KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 1 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown") echo "Redis database 1 key count after flush: $KEY_COUNT" - name: Run All Tests and Generate Merged Coverage Report # This single step runs both unit and integration tests, then merges their # coverage data into a single report. It combines the environment variables # needed for both test suites. env: # --- Database credentials for the test suite --- # These are injected from Gitea secrets into the runner's environment. DB_HOST: ${{ secrets.DB_HOST }} DB_USER: ${{ secrets.DB_USER }} DB_PASSWORD: ${{ secrets.DB_PASSWORD }} DB_NAME: 'flyer-crawler-test' # Explicitly set for tests # --- Redis credentials for the test suite --- # CRITICAL: Use Redis database 1 to isolate tests from production (which uses db 0). # This prevents the production worker from picking up test jobs. REDIS_URL: 'redis://localhost:6379/1' REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }} # --- Integration test specific variables --- FRONTEND_URL: 'https://example.com' VITE_API_BASE_URL: 'http://localhost:3001/api' GEMINI_API_KEY: ${{ secrets.VITE_GOOGLE_GENAI_API_KEY }} # --- Storage path for flyer images --- # CRITICAL: Use an absolute path in the test runner's working directory for file storage. # This ensures tests can read processed files to verify their contents (e.g., EXIF stripping). # Without this, multer and flyerProcessingService default to /var/www/.../flyer-images. # NOTE: We use ${{ github.workspace }} which resolves to the checkout directory. STORAGE_PATH: '${{ github.workspace }}/flyer-images' # --- JWT Secret for Passport authentication in tests --- JWT_SECRET: ${{ secrets.JWT_SECRET }} # --- V8 Coverage for Server Process --- # This variable tells the Node.js process (our server, started by globalSetup) # where to output its raw V8 coverage data. NODE_V8_COVERAGE: '.coverage/tmp/integration-server' # --- Increase Node.js memory limit to prevent heap out of memory errors --- # This is crucial for memory-intensive tasks like running tests and coverage. NODE_OPTIONS: '--max-old-space-size=8192 --trace-warnings --unhandled-rejections=strict' run: | # Fail-fast check to ensure secrets are configured in Gitea for testing. if [ -z "$DB_HOST" ] || [ -z "$DB_USER" ] || [ -z "$DB_PASSWORD" ] || [ -z "$DB_NAME" ] || [ -z "$GEMINI_API_KEY" ] || [ -z "$REDIS_PASSWORD" ] || [ -z "$JWT_SECRET" ]; then echo "ERROR: One or more test secrets (DB_*, GEMINI_API_KEY, REDIS_PASSWORD_TEST) are not set in Gitea repository secrets." exit 1 fi # Temporarily disable secret masking to prevent the runner from garbling test output numbers. echo "::stop-commands secret-masking::" # Run unit and integration tests as separate steps. # The `|| true` ensures the workflow continues even if tests fail, allowing coverage to run. echo "--- Running Unit Tests ---" # npm run test:unit -- --coverage --reporter=verbose --includeTaskLocation --testTimeout=10000 --silent=passed-only || true npm run test:unit -- --coverage \ --coverage.exclude='**/*.test.ts' \ --coverage.exclude='**/tests/**' \ --coverage.exclude='**/mocks/**' \ --coverage.exclude='src/components/icons/**' \ --coverage.exclude='src/db/**' \ --coverage.exclude='src/lib/**' \ --coverage.exclude='src/types/**' \ --coverage.exclude='**/index.tsx' \ --coverage.exclude='**/vite-env.d.ts' \ --coverage.exclude='**/vitest.setup.ts' \ --reporter=verbose --includeTaskLocation --testTimeout=10000 --silent=passed-only --no-file-parallelism || true echo "--- Running Integration Tests ---" npm run test:integration -- --coverage \ --coverage.exclude='**/*.test.ts' \ --coverage.exclude='**/tests/**' \ --coverage.exclude='**/mocks/**' \ --coverage.exclude='src/components/icons/**' \ --coverage.exclude='src/db/**' \ --coverage.exclude='src/lib/**' \ --coverage.exclude='src/types/**' \ --coverage.exclude='**/index.tsx' \ --coverage.exclude='**/vite-env.d.ts' \ --coverage.exclude='**/vitest.setup.ts' \ --reporter=verbose --includeTaskLocation --testTimeout=10000 --silent=passed-only || true echo "--- Running E2E Tests ---" # Run E2E tests using the dedicated E2E config which inherits from integration config. # We still pass --coverage to enable it, but directory and timeout are now in the config. npx vitest run --config vitest.config.e2e.ts --coverage \ --coverage.exclude='**/*.test.ts' \ --coverage.exclude='**/tests/**' \ --coverage.exclude='**/mocks/**' \ --coverage.exclude='src/components/icons/**' \ --coverage.exclude='src/db/**' \ --coverage.exclude='src/lib/**' \ --coverage.exclude='src/types/**' \ --coverage.exclude='**/index.tsx' \ --coverage.exclude='**/vite-env.d.ts' \ --coverage.exclude='**/vitest.setup.ts' \ --reporter=verbose --no-file-parallelism || true # Re-enable secret masking for subsequent steps. echo "::secret-masking::" continue-on-error: true # Allows the workflow to proceed even if tests fail. - name: Merge Coverage and Display Summary if: always() # This step runs even if the previous test step failed. run: | echo "--- Merging Coverage Reports and Displaying Text Summary ---" # Add logging to verify that the source coverage files exist before merging. echo "Checking for source coverage files..." ls -l .coverage/unit/coverage-final.json ls -l .coverage/integration/coverage-final.json ls -l .coverage/e2e/coverage-final.json || echo "E2E coverage file not found" # --- V8 Coverage Processing for Backend Server --- # The integration tests start the server, which generates raw V8 coverage data. # This step uses the `c8` tool to convert that raw data into a standard # Istanbul coverage report (`coverage-final.json`) that can be merged. echo "Processing V8 coverage data from the integration test server..." # Create a dedicated output directory for the server's coverage report. mkdir -p .coverage/integration-server || echo "Directory .coverage/integration-server already exists." mkdir -p .coverage/tmp/integration-server || echo "Directory .coverage/tmp/integration-server already exists." # Run c8: read raw files from the temp dir, and output an Istanbul JSON report. # We only generate the 'json' report here because it's all nyc needs for merging. echo "Server coverage report about to be generated..." npx c8 report --exclude='**/*.test.ts' --exclude='**/tests/**' --exclude='**/mocks/**' --reporter=json --temp-directory .coverage/tmp/integration-server --reports-dir .coverage/integration-server echo "Server coverage report generated. Verifying existence:" ls -l .coverage/integration-server/coverage-final.json # Now we have three coverage reports: # nyc's `report` command can merge multiple coverage files automatically. # The standard way to do this is to place all `coverage-final.json` files # into a single directory and point `nyc report` to it. # Step 1: Define a directory for nyc to use as its source for merging. # We use a path relative to the workspace to avoid issues with the runner's CWD. NYC_SOURCE_DIR=".coverage/nyc-source-for-report" mkdir -p "$NYC_SOURCE_DIR" || echo "Directory $NYC_SOURCE_DIR already exists." echo "Created temporary directory for nyc reporting source: $NYC_SOURCE_DIR" # Step 2: Copy the individual coverage reports into the source directory. # We give them unique names to be safe, though it's not strictly necessary. cp .coverage/unit/coverage-final.json "$NYC_SOURCE_DIR/unit-coverage.json" cp .coverage/integration/coverage-final.json "$NYC_SOURCE_DIR/integration-coverage.json" cp .coverage/e2e/coverage-final.json "$NYC_SOURCE_DIR/e2e-coverage.json" || echo "E2E coverage file not found, skipping." # This file might not exist if integration tests fail early, so we add `|| true` cp .coverage/integration-server/coverage-final.json "$NYC_SOURCE_DIR/integration-server-coverage.json" || echo "Server coverage file not found, skipping." echo "Copied coverage files to source directory. Contents:" ls -l "$NYC_SOURCE_DIR" # Step 3: Generate the reports directly from the source directory. # We explicitly tell nyc where to find the source coverage files (`--temp-dir`) # and where to output the final reports (`--report-dir`). # This avoids the ENOENT error by preventing `nyc` from looking in a default # cache location (`.nyc_output`) which was causing the failure. echo "Generating reports from coverage data..." # Temporarily disable secret masking to prevent the runner from garbling test output numbers. echo "::stop-commands secret-masking::" npx nyc report \ --reporter=text \ --reporter=html \ --report-dir .coverage/ \ --temp-dir "$NYC_SOURCE_DIR" \ --exclude "**/*.test.ts" \ --exclude "**/tests/**" \ --exclude "**/mocks/**" \ --exclude "**/index.tsx" \ --exclude "**/vite-env.d.ts" \ --exclude "**/vitest.setup.ts" # Re-enable secret masking for subsequent steps. echo "::secret-masking::" echo "✅ Coverage reports generated successfully." continue-on-error: true # Allows the workflow to proceed even if coverage merge fails. - name: Clean Up Test Artifacts if: always() # This step runs even if the previous test or coverage steps failed. run: echo "Skipping test artifact cleanup on runner; this is handled on the server." - name: Archive Code Coverage Report # This action saves the generated HTML coverage report as a downloadable artifact. uses: actions/upload-artifact@v3 with: name: code-coverage-report path: .coverage/ continue-on-error: true # Allows the workflow to proceed even if tests fail. - name: Check for Test Database Schema Changes env: # Use test database credentials for this check. DB_HOST: ${{ secrets.DB_HOST }} DB_USER: ${{ secrets.DB_USER }} DB_PASSWORD: ${{ secrets.DB_PASSWORD }} # This is used by psql DB_NAME: ${{ secrets.DB_DATABASE_TEST }} # This is used by the application run: | # Fail-fast check to ensure secrets are configured in Gitea. if [ -z "$DB_HOST" ] || [ -z "$DB_USER" ] || [ -z "$DB_PASSWORD" ] || [ -z "$DB_NAME" ]; then echo "ERROR: One or more test database secrets (DB_HOST, DB_USER, DB_PASSWORD, DB_DATABASE_TEST) are not set in Gitea repository settings." exit 1 fi echo "--- Checking for test schema changes ---" # Calculate the hash of the current schema file in the repository. # We normalize line endings to ensure the hash is consistent across different OS environments. CURRENT_HASH=$(cat sql/master_schema_rollup.sql | dos2unix | sha256sum | awk '{ print $1 }') echo "Current Git Schema Hash: $CURRENT_HASH" # Query the production database to get the hash of the deployed schema. # The `psql` command requires PGPASSWORD to be set. # `\t` sets tuples-only mode and `\A` unaligns output to get just the raw value. # The psql command will now fail the step if the query errors (e.g., column missing), preventing deployment on a bad schema. DEPLOYED_HASH=$(PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -c "SELECT schema_hash FROM public.schema_info WHERE environment = 'test';" -t -A) echo "Deployed DB Schema Hash: $DEPLOYED_HASH" # Check if the hash is "none" (command failed) OR if it's an empty string (table exists but is empty). if [ -z "$DEPLOYED_HASH" ]; then echo "WARNING: No schema hash found in the test database." echo "This is expected for a first-time deployment. The hash will be set after a successful deployment." echo "--- Debug: Dumping schema_info table ---" PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=0 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -P pager=off -c "SELECT * FROM public.schema_info;" || true echo "----------------------------------------" # We allow the deployment to continue, but a manual schema update is required. # You could choose to fail here by adding `exit 1`. elif [ "$CURRENT_HASH" != "$DEPLOYED_HASH" ]; then echo "ERROR: Database schema mismatch detected!" echo "The schema file in the repository has changed. A manual database migration is required." exit 1 # Fail the deployment pipeline. else echo "✅ Schema is up to date. No changes detected." fi # --- Frontend Deployment --- - name: Build React Application # We set the environment variable directly in the command line for this step. # This maps the Gitea secret to the environment variable the application expects. # We also generate and inject the application version, commit URL, and commit message. run: | # Fail-fast check for the build-time secret. if [ -z "${{ secrets.VITE_GOOGLE_GENAI_API_KEY }}" ]; then echo "ERROR: The VITE_GOOGLE_GENAI_API_KEY secret is not set." exit 1 fi GITEA_SERVER_URL="https://gitea.projectium.com" # Your Gitea instance URL # Sanitize commit message to prevent shell injection or build breaks (removes quotes, backticks, backslashes, $) COMMIT_MESSAGE=$(git log -1 --grep="\[skip ci\]" --invert-grep --pretty=%s | tr -d '"`\\$') PACKAGE_VERSION=$(node -p "require('./package.json').version") VITE_APP_VERSION="$(date +'%Y%m%d-%H%M'):$(git rev-parse --short HEAD):$PACKAGE_VERSION" \ VITE_APP_COMMIT_URL="$GITEA_SERVER_URL/${{ gitea.repository }}/commit/${{ gitea.sha }}" \ VITE_APP_COMMIT_MESSAGE="$COMMIT_MESSAGE" \ VITE_API_BASE_URL="https://flyer-crawler-test.projectium.com/api" VITE_API_KEY=${{ secrets.VITE_GOOGLE_GENAI_API_KEY_TEST }} npm run build - name: Deploy Application to Test Server run: | echo "Deploying application files to /var/www/flyer-crawler-test.projectium.com..." APP_PATH="/var/www/flyer-crawler-test.projectium.com" # Ensure the destination directory exists mkdir -p "$APP_PATH" mkdir -p "$APP_PATH/flyer-images/icons" "$APP_PATH/flyer-images/archive" # Ensure all required subdirectories exist # 1. Copy the backend source code and project files first. # CRITICAL: We exclude 'node_modules', '.git', and 'dist'. rsync -avz --delete --exclude 'node_modules' --exclude '.git' --exclude 'dist' --exclude 'flyer-images' ./ "$APP_PATH/" # 2. Copy the built frontend assets into the same directory. # This will correctly place index.html and the assets/ folder in the webroot. rsync -avz dist/ "$APP_PATH" echo "Application deployment complete." - name: Deploy Coverage Report to Public URL if: always() run: | TARGET_DIR="/var/www/flyer-crawler-test.projectium.com/coverage" echo "Deploying HTML coverage report to $TARGET_DIR..." mkdir -p "$TARGET_DIR" rm -rf "$TARGET_DIR"/* # The merged nyc report is generated in the .coverage directory. We copy its contents. cp -r .coverage/* "$TARGET_DIR/" echo "✅ Coverage report deployed to https://flyer-crawler-test.projectium.com/coverage" - name: Install Backend Dependencies and Restart Test Server env: # --- Test Secrets Injection --- # These secrets are injected into the environment for the PM2 process. # Your Node.js application will read these directly from `process.env`. # Database Credentials DB_HOST: ${{ secrets.DB_HOST }} DB_USER: ${{ secrets.DB_USER }} DB_PASSWORD: ${{ secrets.DB_PASSWORD }} DB_NAME: ${{ secrets.DB_DATABASE_TEST }} # Redis Credentials (use database 1 to isolate from production) REDIS_URL: 'redis://localhost:6379/1' REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }} # Application Secrets FRONTEND_URL: 'https://example.com' JWT_SECRET: ${{ secrets.JWT_SECRET }} GEMINI_API_KEY: ${{ secrets.VITE_GOOGLE_GENAI_API_KEY_TEST }} GOOGLE_MAPS_API_KEY: ${{ secrets.GOOGLE_MAPS_API_KEY }} # SMTP (email) SMTP_HOST: 'localhost' SMTP_PORT: '1025' SMTP_SECURE: 'false' SMTP_USER: '' # Using MailHog, no auth needed SMTP_PASS: '' # Using MailHog, no auth needed SMTP_FROM_EMAIL: 'noreply@flyer-crawler-test.projectium.com' run: | # Fail-fast check to ensure secrets are configured in Gitea. MISSING_SECRETS="" if [ -z "$DB_HOST" ]; then MISSING_SECRETS="${MISSING_SECRETS} DB_HOST"; fi if [ -z "$DB_USER" ]; then MISSING_SECRETS="${MISSING_SECRETS} DB_USER"; fi if [ -z "$DB_PASSWORD" ]; then MISSING_SECRETS="${MISSING_SECRETS} DB_PASSWORD"; fi if [ -z "$DB_NAME" ]; then MISSING_SECRETS="${MISSING_SECRETS} DB_NAME"; fi if [ -z "$JWT_SECRET" ]; then MISSING_SECRETS="${MISSING_SECRETS} JWT_SECRET"; fi if [ ! -z "$MISSING_SECRETS" ]; then echo "ERROR: The following required secrets are missing in Gitea:${MISSING_SECRETS}" exit 1 fi echo "Installing production dependencies and restarting test server..." cd /var/www/flyer-crawler-test.projectium.com npm install --omit=dev # --- Cleanup Errored Processes --- echo "Cleaning up errored or stopped PM2 processes..." node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') { console.log('Deleting ' + p.pm2_env.status + ' process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); } catch (e) { console.error('Error cleaning up processes:', e); }" # Use `startOrReload` with the ecosystem file. This is the standard, idempotent way to deploy. # It will START the process if it's not running, or RELOAD it if it is. # We also add `&& pm2 save` to persist the process list across server reboots. pm2 startOrReload ecosystem.config.cjs --env test --update-env && pm2 save echo "Test backend server reloaded successfully." # After a successful deployment, update the schema hash in the database. # This ensures the next deployment will compare against this new state. echo "Updating schema hash in test database..." CURRENT_HASH=$(cat sql/master_schema_rollup.sql | dos2unix | sha256sum | awk '{ print $1 }') PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -c \ "CREATE TABLE IF NOT EXISTS public.schema_info ( environment VARCHAR(50) PRIMARY KEY, schema_hash VARCHAR(64) NOT NULL, deployed_at TIMESTAMP DEFAULT NOW() ); INSERT INTO public.schema_info (environment, schema_hash, deployed_at) VALUES ('test', '$CURRENT_HASH', NOW()) ON CONFLICT (environment) DO UPDATE SET schema_hash = EXCLUDED.schema_hash, deployed_at = NOW();" # Verify the hash was updated UPDATED_HASH=$(PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -c "SELECT schema_hash FROM public.schema_info WHERE environment = 'test';" -t -A) if [ "$CURRENT_HASH" = "$UPDATED_HASH" ]; then echo "✅ Schema hash successfully updated in the database to: $UPDATED_HASH" else echo "ERROR: Failed to update schema hash in the database." fi echo "--- Cleaning up test-generated flyer assets from production directories ---" PROD_APP_PATH="/var/www/flyer-crawler.projectium.com" find "$PROD_APP_PATH/flyer-images" -type f -name '*-test-flyer-image.*' -delete || echo "No test flyer images to delete in prod." find "$PROD_APP_PATH/flyer-images/icons" -type f -name '*-test-flyer-image.*' -delete || echo "No test flyer icons to delete in prod." find "$PROD_APP_PATH/flyer-images/archive" -mindepth 1 -maxdepth 1 -type f -delete || echo "Prod archive directory is empty or not found, skipping." echo "✅ Test artifacts cleared from production asset directories." - name: Show PM2 Environment for Test run: | echo "--- Displaying recent PM2 logs for flyer-crawler-api-test ---" # After a reload, the server restarts. We'll show the last 20 lines of the log to see the startup messages. sleep 5 # Resolve the PM2 ID dynamically to ensure we target the correct process PM2_ID=$(pm2 jlist | node -e "try { const list = JSON.parse(require('fs').readFileSync(0, 'utf-8')); const app = list.find(p => p.name === 'flyer-crawler-api-test'); console.log(app ? app.pm2_env.pm_id : ''); } catch(e) { console.log(''); }") if [ -n "$PM2_ID" ]; then echo "Found process ID: $PM2_ID" pm2 describe "$PM2_ID" || echo "Failed to describe process $PM2_ID" pm2 logs "$PM2_ID" --lines 20 --nostream || echo "Failed to get logs for $PM2_ID" pm2 env "$PM2_ID" || echo "Failed to get env for $PM2_ID" else echo "Could not find process 'flyer-crawler-api-test' in pm2 list." pm2 list # Fallback to listing everything to help debug fi