don't store short env vlaues in gitea secrets
All checks were successful
Deploy to Web Server flyer-crawler.projectium.com / deploy (push) Successful in 3m43s
All checks were successful
Deploy to Web Server flyer-crawler.projectium.com / deploy (push) Successful in 3m43s
This commit is contained in:
@@ -82,7 +82,6 @@ jobs:
|
||||
# --- Database credentials for the test suite ---
|
||||
# These are injected from Gitea secrets into the runner's environment.
|
||||
DB_HOST: ${{ secrets.DB_HOST }}
|
||||
DB_PORT: ${{ secrets.DB_PORT }}
|
||||
DB_USER: ${{ secrets.DB_USER }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
DB_NAME: "flyer-crawler-test" # Explicitly set for tests
|
||||
@@ -107,12 +106,11 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run unit and integration tests as separate steps.
|
||||
# The `|| true` ensures the workflow continues even if tests fail, allowing coverage to run.
|
||||
|
||||
# Temporarily disable secret masking to prevent the runner from garbling test output numbers.
|
||||
echo "::stop-commands secret-masking::"
|
||||
|
||||
# Run unit and integration tests as separate steps.
|
||||
# The `|| true` ensures the workflow continues even if tests fail, allowing coverage to run.
|
||||
echo "--- Running Unit Tests ---"
|
||||
npm run test:unit -- --coverage --reporter=verbose --includeTaskLocation --testTimeout=20000 || true
|
||||
|
||||
@@ -217,7 +215,6 @@ jobs:
|
||||
env:
|
||||
# Use production database credentials for this check.
|
||||
DB_HOST: ${{ secrets.DB_HOST }}
|
||||
DB_PORT: ${{ secrets.DB_PORT }}
|
||||
DB_USER: ${{ secrets.DB_USER }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }} # This is used by psql
|
||||
DB_NAME: ${{ secrets.DB_DATABASE_PROD }} # This is used by the application
|
||||
@@ -238,7 +235,7 @@ jobs:
|
||||
# The `psql` command requires PGPASSWORD to be set.
|
||||
# `\t` sets tuples-only mode and `\A` unaligns output to get just the raw value.
|
||||
# The `|| echo "none"` ensures the command doesn't fail if the table or row doesn't exist yet.
|
||||
DEPLOYED_HASH=$(PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT schema_hash FROM public.schema_info WHERE id = 1;" -t -A || echo "none")
|
||||
DEPLOYED_HASH=$(PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -c "SELECT schema_hash FROM public.schema_info WHERE id = 1;" -t -A || echo "none")
|
||||
echo "Deployed DB Schema Hash: $DEPLOYED_HASH"
|
||||
|
||||
# Check if the hash is "none" (command failed) OR if it's an empty string (table exists but is empty).
|
||||
@@ -300,7 +297,6 @@ jobs:
|
||||
|
||||
# Database Credentials
|
||||
DB_HOST: ${{ secrets.DB_HOST }}
|
||||
DB_PORT: ${{ secrets.DB_PORT }}
|
||||
DB_USER: ${{ secrets.DB_USER }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }} # Used by psql
|
||||
DB_NAME: ${{ secrets.DB_DATABASE_PROD }} # Standardize on the existing prod secret name
|
||||
@@ -314,7 +310,6 @@ jobs:
|
||||
JWT_SECRET: ${{ secrets.JWT_SECRET }}
|
||||
GEMINI_API_KEY: ${{ secrets.VITE_GOOGLE_GENAI_API_KEY }} # Re-use the same secret for the server
|
||||
GOOGLE_MAPS_API_KEY: ${{ secrets.GOOGLE_MAPS_API_KEY }}
|
||||
WORKER_CONCURRENCY: ${{ secrets.WORKER_CONCURRENCY }}
|
||||
|
||||
# SMTP (email)
|
||||
SMTP_HOST: "localhost"
|
||||
@@ -345,12 +340,12 @@ jobs:
|
||||
# This ensures the next deployment will compare against this new state.
|
||||
echo "Updating schema hash in production database..."
|
||||
CURRENT_HASH=$(cat sql/master_schema_rollup.sql | dos2unix | sha256sum | awk '{ print $1 }')
|
||||
PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c \
|
||||
PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -c \
|
||||
"INSERT INTO public.schema_info (id, schema_hash, deployed_at) VALUES (1, '$CURRENT_HASH', NOW())
|
||||
ON CONFLICT (id) DO UPDATE SET schema_hash = EXCLUDED.schema_hash, deployed_at = NOW();"
|
||||
|
||||
# Verify the hash was updated
|
||||
UPDATED_HASH=$(PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT schema_hash FROM public.schema_info WHERE id = 1;" -t -A)
|
||||
UPDATED_HASH=$(PGPASSWORD="$DB_PASSWORD" psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME" -c "SELECT schema_hash FROM public.schema_info WHERE id = 1;" -t -A)
|
||||
if [ "$CURRENT_HASH" = "$UPDATED_HASH" ]; then
|
||||
echo "✅ Schema hash successfully updated in the database to: $UPDATED_HASH"
|
||||
else
|
||||
|
||||
@@ -23,7 +23,6 @@ jobs:
|
||||
env:
|
||||
# Use production database credentials for this entire job.
|
||||
DB_HOST: ${{ secrets.DB_HOST }}
|
||||
DB_PORT: ${{ secrets.DB_PORT }}
|
||||
DB_USER: ${{ secrets.DB_USER }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }} # Used by psql
|
||||
DB_NAME: ${{ secrets.DB_DATABASE_PROD }} # Used by the application
|
||||
|
||||
@@ -22,7 +22,6 @@ jobs:
|
||||
env:
|
||||
# Use production database credentials for this entire job.
|
||||
DB_HOST: ${{ secrets.DB_HOST }}
|
||||
DB_PORT: ${{ secrets.DB_PORT }}
|
||||
DB_USER: ${{ secrets.DB_USER }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
|
||||
@@ -84,7 +83,7 @@ jobs:
|
||||
|
||||
# Uncompress the gzipped file and pipe the SQL commands directly into psql.
|
||||
# This is efficient as it doesn't require an intermediate uncompressed file.
|
||||
gunzip < "$BACKUP_FILE_PATH" | PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME"
|
||||
gunzip < "$BACKUP_FILE_PATH" | PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -p 5432 -U "$DB_USER" -d "$DB_NAME"
|
||||
|
||||
echo "✅ Database restore completed successfully."
|
||||
|
||||
|
||||
Reference in New Issue
Block a user