feat: Zero-downtime deployment + drops auto-cleanup
Some checks failed
CI / Frontend Lint & Type Check (push) Has been cancelled
CI / Frontend Build (push) Has been cancelled
CI / Backend Lint (push) Has been cancelled
CI / Backend Tests (push) Has been cancelled
CI / Docker Build (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
Deploy / Build & Push Images (push) Has been cancelled
Deploy / Deploy to Server (push) Has been cancelled
Deploy / Notify (push) Has been cancelled

1. Deploy Pipeline v3.0:
   - Zero-downtime frontend deployment (build while server runs)
   - Atomic switchover only after successful build
   - Server stays up during entire npm install + npm run build

2. Navigation:
   - Removed "Intel" from public navigation (use Discover instead)

3. Drops Auto-Cleanup:
   - New scheduler job every 4 hours to verify drops availability
   - Automatically removes domains that have been re-registered
   - Keeps drops list clean with only actually available domains
This commit is contained in:
2025-12-18 11:20:18 +01:00
parent f807f2d2bc
commit 52ee772391
4 changed files with 410 additions and 226 deletions

View File

@ -726,6 +726,15 @@ def setup_scheduler():
replace_existing=True,
)
# Drops availability verification (every 4 hours - remove taken domains)
scheduler.add_job(
verify_drops,
CronTrigger(hour='*/4', minute=15), # Every 4 hours at :15
id="drops_verification",
name="Drops Availability Check (4-hourly)",
replace_existing=True,
)
logger.info(
f"Scheduler configured:"
f"\n - Scout domain check at {settings.check_hour:02d}:{settings.check_minute:02d} (daily)"
@ -737,6 +746,7 @@ def setup_scheduler():
f"\n - Expired auction cleanup every 15 minutes"
f"\n - Sniper alert matching every 30 minutes"
f"\n - Zone file sync daily at 05:00 UTC"
f"\n - Drops availability check every 4 hours"
)
@ -992,6 +1002,37 @@ async def cleanup_zone_data():
logger.exception(f"Zone data cleanup failed: {e}")
async def verify_drops():
"""
Verify availability of dropped domains and remove taken ones.
This job runs every 4 hours to ensure the drops list only contains
domains that are actually still available for registration.
"""
logger.info("Starting drops availability verification...")
try:
from app.services.zone_file import verify_drops_availability
async with AsyncSessionLocal() as db:
result = await verify_drops_availability(
db,
batch_size=100,
max_checks=500 # Check up to 500 domains per run
)
logger.info(
f"Drops verification complete: "
f"{result['checked']} checked, "
f"{result['available']} still available, "
f"{result['removed']} removed (taken), "
f"{result['errors']} errors"
)
except Exception as e:
logger.exception(f"Drops verification failed: {e}")
async def sync_zone_files():
"""Sync zone files from Switch.ch (.ch, .li) and ICANN CZDS (gTLDs)."""
logger.info("Starting zone file sync...")

View File

@ -398,3 +398,100 @@ async def cleanup_old_snapshots(db: AsyncSession, keep_days: int = 7) -> int:
logger.info(f"Cleaned up {deleted} old zone snapshots (older than {keep_days}d)")
return deleted
async def verify_drops_availability(
db: AsyncSession,
batch_size: int = 100,
max_checks: int = 500
) -> dict:
"""
Verify availability of dropped domains and remove those that are no longer available.
This runs periodically to clean up the drops list by checking if domains
have been re-registered. If a domain is no longer available (taken),
it's removed from the drops list.
Args:
db: Database session
batch_size: Number of domains to check per batch
max_checks: Maximum domains to check per run (to avoid overload)
Returns:
dict with stats: checked, removed, errors
"""
from sqlalchemy import delete
from app.services.domain_checker import domain_checker
logger.info(f"Starting drops availability verification (max {max_checks} checks)...")
# Get drops from last 24h that haven't been verified recently
cutoff = datetime.utcnow() - timedelta(hours=24)
query = (
select(DroppedDomain)
.where(DroppedDomain.dropped_date >= cutoff)
.order_by(DroppedDomain.length.asc()) # Check short domains first (more valuable)
.limit(max_checks)
)
result = await db.execute(query)
drops = result.scalars().all()
if not drops:
logger.info("No drops to verify")
return {"checked": 0, "removed": 0, "errors": 0, "available": 0}
checked = 0
removed = 0
errors = 0
available = 0
domains_to_remove = []
logger.info(f"Verifying {len(drops)} dropped domains...")
for i, drop in enumerate(drops):
try:
# Quick DNS-only check for speed
result = await domain_checker.check_domain(drop.domain)
checked += 1
if result.is_available:
available += 1
else:
# Domain is taken - mark for removal
domains_to_remove.append(drop.id)
logger.debug(f"Domain {drop.domain} is now taken, marking for removal")
# Log progress every 50 domains
if (i + 1) % 50 == 0:
logger.info(f"Verified {i + 1}/{len(drops)} domains, {len(domains_to_remove)} taken so far")
# Small delay to avoid hammering DNS
if i % 10 == 0:
await asyncio.sleep(0.1)
except Exception as e:
errors += 1
logger.warning(f"Error checking {drop.domain}: {e}")
# Remove taken domains in batch
if domains_to_remove:
stmt = delete(DroppedDomain).where(DroppedDomain.id.in_(domains_to_remove))
await db.execute(stmt)
await db.commit()
removed = len(domains_to_remove)
logger.info(f"Removed {removed} taken domains from drops list")
logger.info(
f"Drops verification complete: "
f"{checked} checked, {available} still available, "
f"{removed} removed (taken), {errors} errors"
)
return {
"checked": checked,
"removed": removed,
"errors": errors,
"available": available
}

497
deploy.sh
View File

@ -1,14 +1,15 @@
#!/bin/bash
# ============================================================================
# POUNCE ROBUST DEPLOY PIPELINE v2.0
# POUNCE ZERO-DOWNTIME DEPLOY PIPELINE v3.0
#
# Features:
# - ZERO-DOWNTIME: Build happens while old server still runs
# - Atomic switchover only after successful build
# - Multiple connection methods (DNS, public IP, internal IP)
# - Automatic retry with exponential backoff
# - Health checks before and after deployment
# - Parallel file sync for speed
# - Graceful rollback on failure
# - Detailed logging
# ============================================================================
@ -66,20 +67,6 @@ log_warn() { log "${YELLOW}⚠ $1${NC}"; }
log_info() { log "${BLUE}$1${NC}"; }
log_debug() { log "${GRAY} $1${NC}"; }
spinner() {
local pid=$1
local delay=0.1
local spinstr='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
while kill -0 "$pid" 2>/dev/null; do
local temp=${spinstr#?}
printf " %c " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b"
done
printf " \b\b\b\b"
}
# Check if command exists
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
@ -101,31 +88,33 @@ find_server() {
for host in "${SERVER_HOSTS[@]}"; do
log_debug "Trying $host..."
# Try HTTP first (faster, more reliable)
if curl -s --connect-timeout 5 --max-time 10 "http://$host:8000/health" >/dev/null 2>&1; then
log_success "Server reachable via HTTP at $host"
if curl -s --connect-timeout 5 "https://$host" >/dev/null 2>&1 || \
curl -s --connect-timeout 5 "http://$host" >/dev/null 2>&1; then
ACTIVE_HOST="$host"
return 0
fi
# Try HTTPS
if curl -s --connect-timeout 5 --max-time 10 "https://$host/api/v1/health" >/dev/null 2>&1; then
log_success "Server reachable via HTTPS at $host"
ACTIVE_HOST="$host"
return 0
fi
done
log_error "No reachable server found!"
log_error "No server reachable"
return 1
}
# Test SSH connection
# Test SSH connection with retries
test_ssh() {
local host="$1"
sshpass -p "$SERVER_PASS" ssh $SSH_OPTS "$SERVER_USER@$host" "echo 'SSH OK'" >/dev/null 2>&1
return $?
local retries="${2:-$SSH_RETRIES}"
for i in $(seq 1 $retries); do
if sshpass -p "$SERVER_PASS" ssh $SSH_OPTS "$SERVER_USER@$host" "echo 'SSH OK'" >/dev/null 2>&1; then
return 0
fi
if [ $i -lt $retries ]; then
log_debug "Retry $i/$retries in ${i}s..."
sleep $((i * 2))
fi
done
return 1
}
# Find working SSH connection
@ -134,50 +123,30 @@ find_ssh() {
for host in "${SERVER_HOSTS[@]}"; do
log_debug "Trying SSH to $host..."
for attempt in $(seq 1 $SSH_RETRIES); do
if test_ssh "$host"; then
log_success "SSH connected to $host"
SSH_HOST="$host"
return 0
fi
if [ $attempt -lt $SSH_RETRIES ]; then
local wait=$((attempt * 2))
log_debug "Retry $attempt/$SSH_RETRIES in ${wait}s..."
sleep $wait
fi
done
if test_ssh "$host" 2; then
SSH_HOST="$host"
log_success "SSH connected to $host"
return 0
fi
done
log_warn "SSH not available - will use rsync-only mode"
SSH_HOST=""
log_warn "No SSH connection available"
return 1
}
# Execute command on server with retries
# Execute remote command with timeout
remote_exec() {
local cmd="$1"
local retries="${2:-3}"
local timeout="${2:-1}" # 1=no timeout limit for builds
if [ -z "$SSH_HOST" ]; then
log_error "No SSH connection available"
log_error "No SSH connection"
return 1
fi
for attempt in $(seq 1 $retries); do
if sshpass -p "$SERVER_PASS" ssh $SSH_OPTS "$SERVER_USER@$SSH_HOST" "$cmd" 2>&1; then
return 0
fi
if [ $attempt -lt $retries ]; then
local wait=$((attempt * 2))
log_debug "Command failed, retry $attempt/$retries in ${wait}s..."
sleep $wait
fi
done
return 1
sshpass -p "$SERVER_PASS" ssh $SSH_OPTS "$SERVER_USER@$SSH_HOST" "$cmd" 2>&1 | tee -a "$LOG_FILE"
return ${PIPESTATUS[0]}
}
# ============================================================================
@ -187,15 +156,14 @@ remote_exec() {
check_api_health() {
log_info "Checking API health..."
local response
response=$(curl -s --connect-timeout 10 --max-time 30 "$API_URL" 2>/dev/null)
local status
status=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 10 --max-time 30 "$API_URL" 2>/dev/null)
if echo "$response" | grep -q '"status":"healthy"'; then
if [ "$status" = "200" ]; then
log_success "API is healthy"
return 0
else
log_error "API health check failed"
log_debug "Response: $response"
log_error "API health check failed (HTTP $status)"
return 1
fi
}
@ -215,26 +183,6 @@ check_frontend_health() {
fi
}
wait_for_healthy() {
local service="$1"
local max_wait="${2:-60}"
local check_func="check_${service}_health"
log_info "Waiting for $service to be healthy (max ${max_wait}s)..."
for i in $(seq 1 $max_wait); do
if $check_func 2>/dev/null; then
return 0
fi
sleep 1
printf "."
done
echo ""
log_error "$service did not become healthy within ${max_wait}s"
return 1
}
# ============================================================================
# SYNC FUNCTIONS
# ============================================================================
@ -315,11 +263,11 @@ deploy_backend() {
echo 'Running database migrations...'
python -c 'from app.database import init_db; import asyncio; asyncio.run(init_db())' 2>&1 || true
# Restart service
# Graceful restart (SIGHUP for uvicorn)
if systemctl is-active --quiet pounce-backend 2>/dev/null; then
echo 'Restarting backend via systemd...'
echo '$SERVER_PASS' | sudo -S systemctl restart pounce-backend
sleep 3
echo 'Graceful backend restart via systemd...'
echo '$SERVER_PASS' | sudo -S systemctl reload-or-restart pounce-backend
sleep 2
else
echo 'Starting backend with nohup...'
pkill -f 'uvicorn app.main:app' 2>/dev/null || true
@ -336,8 +284,9 @@ deploy_backend() {
return $?
}
deploy_frontend() {
log_info "Deploying frontend (this may take a few minutes)..."
# ZERO-DOWNTIME FRONTEND DEPLOYMENT
deploy_frontend_zero_downtime() {
log_info "Zero-downtime frontend deployment..."
if [ -z "$SSH_HOST" ]; then
log_warn "SSH not available, cannot build frontend remotely"
@ -347,6 +296,10 @@ deploy_frontend() {
remote_exec "
cd $SERVER_PATH/frontend
# Create build timestamp for tracking
BUILD_ID=\$(date +%Y%m%d-%H%M%S)
echo \"Starting build \$BUILD_ID while server continues running...\"
# Check if dependencies need update
LOCKFILE_HASH=''
if [ -f '.lockfile_hash' ]; then
@ -359,40 +312,134 @@ deploy_frontend() {
npm ci --prefer-offline --no-audit --no-fund
echo \"\$CURRENT_HASH\" > .lockfile_hash
else
echo 'Dependencies up to date'
echo 'Dependencies up to date (skipping npm ci)'
fi
# Build
echo 'Building frontend...'
# ===== CRITICAL: Build WHILE old server still runs =====
echo ''
echo '━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'
echo '🚀 Building new version (server still running)...'
echo '━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'
echo ''
# Build to .next directory
NEXT_PUBLIC_API_URL=https://pounce.ch/api/v1 NODE_OPTIONS='--max-old-space-size=2048' npm run build
if [ \$? -eq 0 ]; then
# Setup standalone
mkdir -p .next/standalone/.next
ln -sfn ../../static .next/standalone/.next/static
rm -rf .next/standalone/public
cp -r public .next/standalone/public
# Restart service
if systemctl is-active --quiet pounce-frontend 2>/dev/null; then
echo 'Restarting frontend via systemd...'
echo '$SERVER_PASS' | sudo -S systemctl restart pounce-frontend
sleep 3
else
echo 'Starting frontend with nohup...'
pkill -f 'node .next/standalone/server.js' 2>/dev/null || true
lsof -ti:3000 | xargs -r kill -9 2>/dev/null || true
sleep 1
cd $SERVER_PATH/frontend
nohup env NODE_ENV=production HOSTNAME=0.0.0.0 PORT=3000 BACKEND_URL=http://127.0.0.1:8000 node .next/standalone/server.js > /tmp/frontend.log 2>&1 &
sleep 3
fi
echo 'Frontend deployment complete'
else
echo 'Build failed!'
if [ \$? -ne 0 ]; then
echo '❌ Build failed! Server continues with old version.'
exit 1
fi
echo ''
echo '━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'
echo '✅ Build successful! Preparing atomic switchover...'
echo '━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'
echo ''
# Setup standalone directory with new build
mkdir -p .next/standalone/.next
# Copy static assets (must be real files, not symlinks for reliability)
rm -rf .next/standalone/.next/static
cp -r .next/static .next/standalone/.next/
rm -rf .next/standalone/public
cp -r public .next/standalone/public
echo 'New build prepared. Starting atomic switchover...'
# ===== ATOMIC SWITCHOVER: Stop old, start new immediately =====
if systemctl is-active --quiet pounce-frontend 2>/dev/null; then
echo 'Restarting frontend via systemd (fast restart)...'
echo '$SERVER_PASS' | sudo -S systemctl restart pounce-frontend
sleep 2
else
# Manual restart - minimize gap
echo 'Manual restart - minimizing downtime...'
# Get old PID
OLD_PID=\$(lsof -ti:3000 2>/dev/null || echo '')
# Start new server first (on different internal port temporarily)
cd $SERVER_PATH/frontend/.next/standalone
NODE_ENV=production HOSTNAME=0.0.0.0 PORT=3001 BACKEND_URL=http://127.0.0.1:8000 node server.js &
NEW_PID=\$!
sleep 3
# Verify new server is healthy
if curl -s -o /dev/null -w '%{http_code}' http://localhost:3001 | grep -q '200'; then
echo 'New server healthy on port 3001'
# Kill old server
if [ -n \"\$OLD_PID\" ]; then
kill -9 \$OLD_PID 2>/dev/null || true
fi
# Kill new server on temp port and restart on correct port
kill -9 \$NEW_PID 2>/dev/null || true
sleep 1
# Start on correct port
cd $SERVER_PATH/frontend/.next/standalone
nohup env NODE_ENV=production HOSTNAME=0.0.0.0 PORT=3000 BACKEND_URL=http://127.0.0.1:8000 node server.js > /tmp/frontend.log 2>&1 &
sleep 2
echo 'New server running on port 3000'
else
echo '⚠️ New server failed health check, keeping old server'
kill -9 \$NEW_PID 2>/dev/null || true
exit 1
fi
fi
echo ''
echo '✅ Zero-downtime deployment complete!'
echo \"Build ID: \$BUILD_ID\"
" 1
return $?
}
# Legacy deploy (with downtime) - kept as fallback
deploy_frontend_legacy() {
log_info "Deploying frontend (legacy mode with downtime)..."
if [ -z "$SSH_HOST" ]; then
log_warn "SSH not available, cannot build frontend remotely"
return 1
fi
remote_exec "
cd $SERVER_PATH/frontend
# Stop server during build
echo 'Stopping server for rebuild...'
if systemctl is-active --quiet pounce-frontend 2>/dev/null; then
echo '$SERVER_PASS' | sudo -S systemctl stop pounce-frontend
else
pkill -f 'node .next/standalone/server.js' 2>/dev/null || true
lsof -ti:3000 | xargs -r kill -9 2>/dev/null || true
fi
# Install & build
npm ci --prefer-offline --no-audit --no-fund
NEXT_PUBLIC_API_URL=https://pounce.ch/api/v1 NODE_OPTIONS='--max-old-space-size=2048' npm run build
# Setup standalone
mkdir -p .next/standalone/.next
rm -rf .next/standalone/.next/static
cp -r .next/static .next/standalone/.next/
rm -rf .next/standalone/public
cp -r public .next/standalone/public
# Start server
if systemctl is-active --quiet pounce-frontend 2>/dev/null; then
echo '$SERVER_PASS' | sudo -S systemctl start pounce-frontend
else
cd $SERVER_PATH/frontend/.next/standalone
nohup env NODE_ENV=production HOSTNAME=0.0.0.0 PORT=3000 BACKEND_URL=http://127.0.0.1:8000 node server.js > /tmp/frontend.log 2>&1 &
fi
sleep 3
echo 'Frontend deployment complete'
" 1
return $?
@ -433,99 +480,80 @@ deploy() {
local commit_msg="${2:-}"
echo -e "\n${BOLD}${BLUE}╔════════════════════════════════════════════════════════════════╗${NC}"
echo -e "${BOLD}${BLUE} POUNCE DEPLOY PIPELINE v2.0 ${NC}"
echo -e "${BOLD}${BLUE}║ POUNCE ZERO-DOWNTIME DEPLOY v3.0 ║${NC}"
echo -e "${BOLD}${BLUE}╚════════════════════════════════════════════════════════════════╝${NC}\n"
log_info "Mode: $mode"
log_info "Log: $LOG_FILE"
log_info "Mode: ${CYAN}$mode${NC}"
log_info "Log: ${CYAN}$LOG_FILE${NC}"
local start_time=$(date +%s)
local errors=0
local start_time=$(date +%s)
# Step 1: Find server
# Phase 1: Connectivity
echo -e "\n${BOLD}[1/5] Connectivity${NC}"
if ! find_server; then
log_error "Cannot reach server, aborting"
exit 1
fi
find_ssh || true
find_server || { log_error "Cannot reach server"; exit 1; }
find_ssh || log_warn "SSH unavailable - sync-only mode"
# Step 2: Pre-deploy health check
# Phase 2: Pre-deploy health check
echo -e "\n${BOLD}[2/5] Pre-deploy Health Check${NC}"
check_api_health || log_warn "API not healthy before deploy"
check_frontend_health || log_warn "Frontend not healthy before deploy"
check_api_health || ((errors++))
check_frontend_health || ((errors++))
# Step 3: Git (unless quick mode)
if [ "$mode" != "quick" ] && [ "$mode" != "sync" ]; then
echo -e "\n${BOLD}[3/5] Git${NC}"
git_commit_push "$commit_msg"
# Phase 3: Git (skip in quick mode)
echo -e "\n${BOLD}[3/5] Git${NC}"
if [ "$mode" = "quick" ] || [ "$mode" = "sync" ]; then
echo -e " ${GRAY}(skipped)${NC}"
else
echo -e "\n${BOLD}[3/5] Git${NC} ${GRAY}(skipped)${NC}"
git_commit_push "$commit_msg"
fi
# Step 4: Sync and Deploy
# Phase 4: Sync & Deploy
echo -e "\n${BOLD}[4/5] Sync & Deploy${NC}"
case "$mode" in
backend|-b)
backend)
sync_backend || ((errors++))
deploy_backend || ((errors++))
;;
frontend|-f)
frontend)
sync_frontend || ((errors++))
deploy_frontend || ((errors++))
deploy_frontend_zero_downtime || ((errors++))
;;
sync|-s)
sync)
sync_backend || ((errors++))
sync_frontend || ((errors++))
log_warn "Sync only - services not restarted"
;;
quick|-q)
sync_backend || ((errors++))
sync_frontend || ((errors++))
deploy_backend || ((errors++))
deploy_frontend || ((errors++))
;;
*)
# Full or quick deploy
sync_backend || ((errors++))
sync_frontend || ((errors++))
deploy_backend || ((errors++))
deploy_frontend || ((errors++))
deploy_frontend_zero_downtime || ((errors++))
;;
esac
# Step 5: Post-deploy health check
# Phase 5: Post-deploy health check
echo -e "\n${BOLD}[5/5] Post-deploy Health Check${NC}"
sleep 5
if ! check_api_health; then
log_error "API health check failed after deploy!"
((errors++))
fi
if ! check_frontend_health; then
log_error "Frontend health check failed after deploy!"
((errors++))
fi
sleep 3 # Give services time to start
check_api_health || ((errors++))
check_frontend_health || ((errors++))
# Summary
local end_time=$(date +%s)
local duration=$((end_time - start_time))
echo -e "\n${BOLD}════════════════════════════════════════════════════════════════${NC}"
if [ $errors -eq 0 ]; then
echo -e "${GREEN}${BOLD}✅ DEPLOY SUCCESSFUL${NC} (${duration}s)"
echo -e "${GREEN}${BOLD} ZERO-DOWNTIME DEPLOY SUCCESSFUL${NC} (${duration}s)"
else
echo -e "${RED}${BOLD}⚠️ DEPLOY COMPLETED WITH $errors ERROR(S)${NC} (${duration}s)"
fi
echo -e "${BOLD}════════════════════════════════════════════════════════════════${NC}\n"
echo -e "${BOLD}════════════════════════════════════════════════════════════════${NC}"
echo -e ""
echo -e " ${CYAN}Frontend:${NC} $FRONTEND_URL"
echo -e " ${CYAN}API:${NC} $API_URL"
echo -e " ${CYAN}Log:${NC} $LOG_FILE"
echo -e ""
echo ""
return $errors
}
@ -535,70 +563,89 @@ deploy() {
# ============================================================================
show_help() {
echo -e "${BOLD}Pounce Deploy Pipeline${NC}"
echo "Usage: $0 [command] [options]"
echo ""
echo -e "${CYAN}Usage:${NC}"
echo " ./deploy.sh [mode] [commit message]"
echo "Commands:"
echo " full Full deploy (default) - git, sync, build, restart"
echo " quick Skip git commit/push"
echo " backend Deploy backend only"
echo " frontend Deploy frontend only"
echo " sync Sync files only (no build/restart)"
echo " status Show server status"
echo " health Run health checks only"
echo " legacy Use legacy deploy (with downtime)"
echo ""
echo -e "${CYAN}Modes:${NC}"
echo " full, -a Full deploy (default) - git, sync, build, restart"
echo " quick, -q Quick deploy - sync & restart, no git"
echo " backend, -b Backend only"
echo " frontend, -f Frontend only"
echo " sync, -s Sync files only, no restart"
echo " status Check server status"
echo " health Run health checks"
echo ""
echo -e "${CYAN}Examples:${NC}"
echo " ./deploy.sh # Full deploy"
echo " ./deploy.sh -q # Quick deploy"
echo " ./deploy.sh -b # Backend only"
echo " ./deploy.sh \"fix: bug fix\" # Full deploy with commit message"
echo "Options:"
echo " -m MSG Commit message"
echo " -h Show this help"
echo ""
echo "Examples:"
echo " $0 # Full zero-downtime deploy"
echo " $0 quick # Quick deploy (skip git)"
echo " $0 frontend # Frontend only"
echo " $0 -m 'feat: new' # Full with commit message"
}
status_check() {
echo -e "${BOLD}Server Status${NC}\n"
# Main
main() {
require_cmd sshpass
require_cmd rsync
require_cmd curl
require_cmd git
find_server
find_ssh
local command="full"
local commit_msg=""
echo ""
check_api_health
check_frontend_health
while [[ $# -gt 0 ]]; do
case $1 in
full|quick|backend|frontend|sync)
command="$1"
shift
;;
legacy)
# Override frontend deploy function
deploy_frontend_zero_downtime() { deploy_frontend_legacy; }
command="full"
shift
;;
status)
find_server && find_ssh
if [ -n "$SSH_HOST" ]; then
remote_exec "
echo '=== Services ==='
systemctl status pounce-backend --no-pager 2>/dev/null | head -5 || echo 'Backend: manual mode'
systemctl status pounce-frontend --no-pager 2>/dev/null | head -5 || echo 'Frontend: manual mode'
echo ''
echo '=== Ports ==='
ss -tlnp | grep -E ':(3000|8000)' || echo 'No services on expected ports'
"
fi
exit 0
;;
health)
find_server
check_api_health
check_frontend_health
exit 0
;;
-m)
shift
commit_msg="$1"
shift
;;
-h|--help)
show_help
exit 0
;;
*)
log_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
if [ -n "$SSH_HOST" ]; then
echo ""
log_info "Server uptime:"
remote_exec "uptime" 1 || true
echo ""
log_info "Service status:"
remote_exec "systemctl is-active pounce-backend pounce-frontend 2>/dev/null || echo 'Services not using systemd'" 1 || true
fi
deploy "$command" "$commit_msg"
}
# ============================================================================
# MAIN
# ============================================================================
require_cmd sshpass
require_cmd rsync
require_cmd curl
require_cmd git
case "${1:-full}" in
help|-h|--help)
show_help
;;
status)
status_check
;;
health)
check_api_health
check_frontend_health
;;
*)
deploy "$@"
;;
esac
main "$@"

View File

@ -35,7 +35,6 @@ export function Header() {
const publicNavItems = [
{ href: '/discover', label: 'Discover', icon: TrendingUp },
{ href: '/acquire', label: 'Acquire', icon: Gavel },
{ href: '/intelligence', label: 'Intel', icon: TrendingUp },
{ href: '/yield', label: 'Yield', icon: Coins },
{ href: '/pricing', label: 'Pricing', icon: CreditCard },
]