- test.sh: save each run to qa-reports/test_run_<timestamp>.log with ANSI-stripped output; show per-stage skip counts in summary - test_qa_plsql: fix wrong table names (parteneri→nom_parteneri, com_antet→comenzi, comenzi_articole→comenzi_elemente), pass datetime for data_comanda, use string JSON values for Oracle get_string(), lookup article with valid price policy - test_integration: fix article search min_length (1→2 chars), use unique SKU per run to avoid soft-delete 409 conflicts - test_qa_responsive: return early instead of skip on empty tables Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
324 lines
12 KiB
Bash
Executable File
324 lines
12 KiB
Bash
Executable File
#!/bin/bash
|
|
# Test orchestrator for GoMag Vending
|
|
# Usage: ./test.sh [ci|full|unit|e2e|oracle|sync|plsql|qa|smoke-prod|logs|--dry-run]
|
|
set -uo pipefail
|
|
|
|
cd "$(dirname "$0")"
|
|
|
|
# ─── Colors ───────────────────────────────────────────────────────────────────
|
|
GREEN='\033[32m'
|
|
RED='\033[31m'
|
|
YELLOW='\033[33m'
|
|
CYAN='\033[36m'
|
|
RESET='\033[0m'
|
|
|
|
# ─── Log file setup ──────────────────────────────────────────────────────────
|
|
LOG_DIR="qa-reports"
|
|
mkdir -p "$LOG_DIR"
|
|
TIMESTAMP=$(date '+%Y%m%d_%H%M%S')
|
|
LOG_FILE="${LOG_DIR}/test_run_${TIMESTAMP}.log"
|
|
|
|
# Strip ANSI codes for log file
|
|
strip_ansi() {
|
|
sed 's/\x1b\[[0-9;]*m//g'
|
|
}
|
|
|
|
# Tee to both terminal and log file (log without colors)
|
|
log_tee() {
|
|
tee >(strip_ansi >> "$LOG_FILE")
|
|
}
|
|
|
|
# ─── Stage tracking ───────────────────────────────────────────────────────────
|
|
declare -a STAGE_NAMES=()
|
|
declare -a STAGE_RESULTS=() # 0=pass, 1=fail, 2=skip
|
|
declare -a STAGE_SKIPPED=() # count of skipped tests per stage
|
|
declare -a STAGE_DETAILS=() # pytest summary line per stage
|
|
EXIT_CODE=0
|
|
TOTAL_SKIPPED=0
|
|
|
|
record() {
|
|
local name="$1"
|
|
local code="$2"
|
|
local skipped="${3:-0}"
|
|
local details="${4:-}"
|
|
STAGE_NAMES+=("$name")
|
|
STAGE_SKIPPED+=("$skipped")
|
|
STAGE_DETAILS+=("$details")
|
|
TOTAL_SKIPPED=$((TOTAL_SKIPPED + skipped))
|
|
if [ "$code" -eq 0 ]; then
|
|
STAGE_RESULTS+=(0)
|
|
else
|
|
STAGE_RESULTS+=(1)
|
|
EXIT_CODE=1
|
|
fi
|
|
}
|
|
|
|
skip_stage() {
|
|
STAGE_NAMES+=("$1")
|
|
STAGE_RESULTS+=(2)
|
|
STAGE_SKIPPED+=(0)
|
|
STAGE_DETAILS+=("")
|
|
}
|
|
|
|
# ─── Environment setup ────────────────────────────────────────────────────────
|
|
setup_env() {
|
|
# Activate venv
|
|
if [ ! -d "venv" ]; then
|
|
echo -e "${RED}ERROR: venv not found. Run ./start.sh first.${RESET}"
|
|
exit 1
|
|
fi
|
|
source venv/bin/activate
|
|
|
|
# Oracle env
|
|
export TNS_ADMIN="$(pwd)/api"
|
|
|
|
INSTANTCLIENT_PATH=""
|
|
if [ -f "api/.env" ]; then
|
|
INSTANTCLIENT_PATH=$(grep -E "^INSTANTCLIENTPATH=" api/.env 2>/dev/null | cut -d'=' -f2- | tr -d ' ' || true)
|
|
fi
|
|
if [ -z "$INSTANTCLIENT_PATH" ]; then
|
|
INSTANTCLIENT_PATH="/opt/oracle/instantclient_21_15"
|
|
fi
|
|
|
|
if [ -d "$INSTANTCLIENT_PATH" ]; then
|
|
export LD_LIBRARY_PATH="${INSTANTCLIENT_PATH}:${LD_LIBRARY_PATH:-}"
|
|
fi
|
|
}
|
|
|
|
# ─── App lifecycle (for tests that need a running app) ───────────────────────
|
|
APP_PID=""
|
|
APP_PORT=5003
|
|
|
|
app_is_running() {
|
|
curl -sf "http://localhost:${APP_PORT}/health" >/dev/null 2>&1
|
|
}
|
|
|
|
start_app() {
|
|
if app_is_running; then
|
|
echo -e "${GREEN}App already running on :${APP_PORT}${RESET}"
|
|
return
|
|
fi
|
|
echo -e "${YELLOW}Starting app on :${APP_PORT}...${RESET}"
|
|
cd api
|
|
python -m uvicorn app.main:app --host 0.0.0.0 --port "$APP_PORT" &>/dev/null &
|
|
APP_PID=$!
|
|
cd ..
|
|
# Wait up to 15 seconds
|
|
for i in $(seq 1 30); do
|
|
if app_is_running; then
|
|
echo -e "${GREEN}App started (PID=${APP_PID})${RESET}"
|
|
return
|
|
fi
|
|
sleep 0.5
|
|
done
|
|
echo -e "${RED}App failed to start within 15s${RESET}"
|
|
[ -n "$APP_PID" ] && kill "$APP_PID" 2>/dev/null || true
|
|
APP_PID=""
|
|
}
|
|
|
|
stop_app() {
|
|
if [ -n "$APP_PID" ]; then
|
|
echo -e "${YELLOW}Stopping app (PID=${APP_PID})...${RESET}"
|
|
kill "$APP_PID" 2>/dev/null || true
|
|
wait "$APP_PID" 2>/dev/null || true
|
|
APP_PID=""
|
|
fi
|
|
}
|
|
|
|
# ─── Dry-run checks ───────────────────────────────────────────────────────────
|
|
dry_run() {
|
|
echo -e "${YELLOW}=== Dry-run: checking prerequisites ===${RESET}"
|
|
local ok=0
|
|
|
|
if [ -d "venv" ]; then
|
|
echo -e "${GREEN}✅ venv exists${RESET}"
|
|
else
|
|
echo -e "${RED}❌ venv missing — run ./start.sh first${RESET}"
|
|
ok=1
|
|
fi
|
|
|
|
source venv/bin/activate 2>/dev/null || true
|
|
|
|
if python -m pytest --version &>/dev/null; then
|
|
echo -e "${GREEN}✅ pytest installed${RESET}"
|
|
else
|
|
echo -e "${RED}❌ pytest not found${RESET}"
|
|
ok=1
|
|
fi
|
|
|
|
if python -c "import playwright" 2>/dev/null; then
|
|
echo -e "${GREEN}✅ playwright installed${RESET}"
|
|
else
|
|
echo -e "${YELLOW}⚠️ playwright not found (needed for e2e/qa)${RESET}"
|
|
fi
|
|
|
|
if [ -n "${ORACLE_USER:-}" ] && [ -n "${ORACLE_PASSWORD:-}" ] && [ -n "${ORACLE_DSN:-}" ]; then
|
|
echo -e "${GREEN}✅ Oracle env vars set${RESET}"
|
|
else
|
|
echo -e "${YELLOW}⚠️ Oracle env vars not set (needed for oracle/sync/full)${RESET}"
|
|
fi
|
|
|
|
exit $ok
|
|
}
|
|
|
|
# ─── Run helpers ──────────────────────────────────────────────────────────────
|
|
run_stage() {
|
|
local label="$1"
|
|
shift
|
|
echo ""
|
|
echo -e "${YELLOW}=== $label ===${RESET}"
|
|
|
|
# Capture output for skip parsing while showing it live
|
|
local tmpout
|
|
tmpout=$(mktemp)
|
|
set +e
|
|
"$@" 2>&1 | tee "$tmpout" | log_tee
|
|
local code=${PIPESTATUS[0]}
|
|
set -e
|
|
|
|
# Parse pytest summary line for skip count
|
|
# Matches lines like: "= 5 passed, 3 skipped in 1.23s ="
|
|
local skipped=0
|
|
local summary_line=""
|
|
summary_line=$(grep -E '=+.*passed|failed|error|skipped.*=+' "$tmpout" | tail -1 || true)
|
|
if [ -n "$summary_line" ]; then
|
|
skipped=$(echo "$summary_line" | grep -oP '\d+(?= skipped)' || echo "0")
|
|
[ -z "$skipped" ] && skipped=0
|
|
fi
|
|
rm -f "$tmpout"
|
|
|
|
record "$label" $code "$skipped" "$summary_line"
|
|
# Don't return $code — let execution continue to next stage
|
|
}
|
|
|
|
# ─── Summary box ──────────────────────────────────────────────────────────────
|
|
print_summary() {
|
|
echo ""
|
|
echo -e "${YELLOW}╔══════════════════════════════════════════════════╗${RESET}"
|
|
echo -e "${YELLOW}║ TEST RESULTS SUMMARY ║${RESET}"
|
|
echo -e "${YELLOW}╠══════════════════════════════════════════════════╣${RESET}"
|
|
|
|
for i in "${!STAGE_NAMES[@]}"; do
|
|
local name="${STAGE_NAMES[$i]}"
|
|
local result="${STAGE_RESULTS[$i]}"
|
|
local skipped="${STAGE_SKIPPED[$i]}"
|
|
# Pad name to 24 chars
|
|
local padded
|
|
padded=$(printf "%-24s" "$name")
|
|
if [ "$result" -eq 0 ]; then
|
|
if [ "$skipped" -gt 0 ]; then
|
|
local skip_note
|
|
skip_note=$(printf "passed (%d skipped)" "$skipped")
|
|
echo -e "${YELLOW}║${RESET} ${GREEN}✅${RESET} ${padded} ${GREEN}passed${RESET} ${CYAN}(${skipped} skipped)${RESET} ${YELLOW}║${RESET}"
|
|
else
|
|
echo -e "${YELLOW}║${RESET} ${GREEN}✅${RESET} ${padded} ${GREEN}passed${RESET} ${YELLOW}║${RESET}"
|
|
fi
|
|
elif [ "$result" -eq 1 ]; then
|
|
echo -e "${YELLOW}║${RESET} ${RED}❌${RESET} ${padded} ${RED}FAILED${RESET} ${YELLOW}║${RESET}"
|
|
else
|
|
echo -e "${YELLOW}║${RESET} ${YELLOW}⏭️ ${RESET} ${padded} ${YELLOW}skipped${RESET} ${YELLOW}║${RESET}"
|
|
fi
|
|
done
|
|
|
|
echo -e "${YELLOW}╠══════════════════════════════════════════════════╣${RESET}"
|
|
if [ "$EXIT_CODE" -eq 0 ]; then
|
|
if [ "$TOTAL_SKIPPED" -gt 0 ]; then
|
|
echo -e "${YELLOW}║${RESET} ${GREEN}All stages passed!${RESET} ${CYAN}(${TOTAL_SKIPPED} tests skipped total)${RESET} ${YELLOW}║${RESET}"
|
|
else
|
|
echo -e "${YELLOW}║${RESET} ${GREEN}All stages passed!${RESET} ${YELLOW}║${RESET}"
|
|
fi
|
|
else
|
|
echo -e "${YELLOW}║${RESET} ${RED}Some stages FAILED — check output above${RESET} ${YELLOW}║${RESET}"
|
|
fi
|
|
echo -e "${YELLOW}║${RESET} Log: ${CYAN}${LOG_FILE}${RESET}"
|
|
echo -e "${YELLOW}║${RESET} Health Score: see qa-reports/"
|
|
echo -e "${YELLOW}╚══════════════════════════════════════════════════╝${RESET}"
|
|
}
|
|
|
|
# ─── Cleanup trap ────────────────────────────────────────────────────────────
|
|
trap 'stop_app' EXIT
|
|
|
|
# ─── Main ─────────────────────────────────────────────────────────────────────
|
|
MODE="${1:-ci}"
|
|
|
|
if [ "$MODE" = "--dry-run" ]; then
|
|
setup_env
|
|
dry_run
|
|
fi
|
|
|
|
setup_env
|
|
|
|
# Write log header
|
|
echo "=== test.sh ${MODE} — $(date '+%Y-%m-%d %H:%M:%S') ===" > "$LOG_FILE"
|
|
echo "" >> "$LOG_FILE"
|
|
|
|
case "$MODE" in
|
|
ci)
|
|
run_stage "Unit tests" python -m pytest -m unit -v
|
|
run_stage "E2E browser" python -m pytest api/tests/e2e/ \
|
|
--ignore=api/tests/e2e/test_dashboard_live.py -v
|
|
;;
|
|
|
|
full)
|
|
run_stage "Unit tests" python -m pytest -m unit -v
|
|
run_stage "E2E browser" python -m pytest api/tests/e2e/ \
|
|
--ignore=api/tests/e2e/test_dashboard_live.py -v
|
|
run_stage "Oracle integration" python -m pytest -m oracle -v
|
|
# Start app for stages that need HTTP access
|
|
start_app
|
|
run_stage "Sync tests" python -m pytest -m sync -v --base-url "http://localhost:${APP_PORT}"
|
|
run_stage "PL/SQL QA" python -m pytest api/tests/qa/test_qa_plsql.py -v
|
|
run_stage "QA suite" python -m pytest -m qa -v --base-url "http://localhost:${APP_PORT}"
|
|
stop_app
|
|
;;
|
|
|
|
unit)
|
|
run_stage "Unit tests" python -m pytest -m unit -v
|
|
;;
|
|
|
|
e2e)
|
|
run_stage "E2E browser" python -m pytest api/tests/e2e/ \
|
|
--ignore=api/tests/e2e/test_dashboard_live.py -v
|
|
;;
|
|
|
|
oracle)
|
|
run_stage "Oracle integration" python -m pytest -m oracle -v
|
|
;;
|
|
|
|
sync)
|
|
start_app
|
|
run_stage "Sync tests" python -m pytest -m sync -v --base-url "http://localhost:${APP_PORT}"
|
|
stop_app
|
|
;;
|
|
|
|
plsql)
|
|
run_stage "PL/SQL QA" python -m pytest api/tests/qa/test_qa_plsql.py -v
|
|
;;
|
|
|
|
qa)
|
|
start_app
|
|
run_stage "QA suite" python -m pytest -m qa -v --base-url "http://localhost:${APP_PORT}"
|
|
stop_app
|
|
;;
|
|
|
|
smoke-prod)
|
|
shift || true
|
|
run_stage "Smoke prod" python -m pytest api/tests/qa/test_qa_smoke_prod.py "$@"
|
|
;;
|
|
|
|
logs)
|
|
run_stage "Logs monitor" python -m pytest api/tests/qa/test_qa_logs_monitor.py -v
|
|
;;
|
|
|
|
*)
|
|
echo -e "${RED}Unknown mode: $MODE${RESET}"
|
|
echo "Usage: $0 [ci|full|unit|e2e|oracle|sync|plsql|qa|smoke-prod|logs|--dry-run]"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
print_summary 2>&1 | log_tee
|
|
echo ""
|
|
echo -e "${CYAN}Full log saved to: ${LOG_FILE}${RESET}"
|
|
exit $EXIT_CODE
|