diff --git a/.gitea/workflows/deploy.yml b/.gitea/workflows/deploy.yml
index 1a2c5d6..f0651d9 100644
--- a/.gitea/workflows/deploy.yml
+++ b/.gitea/workflows/deploy.yml
@@ -57,6 +57,8 @@ jobs:
STRIPE_WEBHOOK_SECRET: ${{ secrets.STRIPE_WEBHOOK_SECRET }}
GOOGLE_CLIENT_SECRET: ${{ secrets.GOOGLE_CLIENT_SECRET }}
GITHUB_CLIENT_SECRET: ${{ secrets.GITHUB_CLIENT_SECRET }}
+ CZDS_USERNAME: ${{ secrets.CZDS_USERNAME }}
+ CZDS_PASSWORD: ${{ secrets.CZDS_PASSWORD }}
run: |
# Stop existing container
docker stop pounce-backend 2>/dev/null || true
@@ -71,10 +73,13 @@ jobs:
--name pounce-backend \
--network n0488s44osgoow4wgo04ogg0 \
--restart unless-stopped \
+ --shm-size=8g \
-v /data/pounce/zones/czds:/data/czds \
-v /data/pounce/zones/switch:/data/switch \
-v /data/pounce/logs:/data/logs \
-e CZDS_DATA_DIR="/data/czds" \
+ -e CZDS_USERNAME="${CZDS_USERNAME}" \
+ -e CZDS_PASSWORD="${CZDS_PASSWORD}" \
-e SWITCH_DATA_DIR="/data/switch" \
-e ZONE_RETENTION_DAYS="3" \
-e DATABASE_URL="${DATABASE_URL}" \
diff --git a/UX_TERMINAL_UX_REPORT.md b/UX_TERMINAL_UX_REPORT.md
index 0153f55..34914ad 100644
--- a/UX_TERMINAL_UX_REPORT.md
+++ b/UX_TERMINAL_UX_REPORT.md
@@ -318,3 +318,9 @@ Empfehlungen:
+
+
+
+
+
+
diff --git a/backend/app/scheduler.py b/backend/app/scheduler.py
index ba84f47..4c134f2 100644
--- a/backend/app/scheduler.py
+++ b/backend/app/scheduler.py
@@ -726,14 +726,16 @@ def setup_scheduler():
replace_existing=True,
)
- # Drops availability verification (every 10 minutes - remove taken domains)
- scheduler.add_job(
- verify_drops,
- CronTrigger(minute='*/10'), # Every 10 minutes
- id="drops_verification",
- name="Drops Availability Check (10-min)",
- replace_existing=True,
- )
+ # Drops availability verification - DISABLED to prevent RDAP bans
+ # The domains from zone files are already verified as "dropped" by the zone diff
+ # We don't need to double-check via RDAP - this causes rate limiting!
+ # scheduler.add_job(
+ # verify_drops,
+ # CronTrigger(hour=12, minute=0), # Once a day at noon if needed
+ # id="drops_verification",
+ # name="Drops Availability Check (daily)",
+ # replace_existing=True,
+ # )
logger.info(
f"Scheduler configured:"
@@ -743,10 +745,11 @@ def setup_scheduler():
f"\n - TLD price scrape 2x daily at 03:00 & 15:00 UTC"
f"\n - Price change alerts at 04:00 & 16:00 UTC"
f"\n - Auction scrape every 2 hours at :30"
- f"\n - Expired auction cleanup every 15 minutes"
+ f"\n - Expired auction cleanup every 5 minutes"
f"\n - Sniper alert matching every 30 minutes"
- f"\n - Zone file sync daily at 05:00 UTC"
- f"\n - Drops availability check every 10 minutes"
+ f"\n - Switch.ch zone sync daily at 05:00 UTC (.ch, .li)"
+ f"\n - ICANN CZDS zone sync daily at 06:00 UTC (gTLDs)"
+ f"\n - Zone cleanup hourly at :45"
)
diff --git a/backend/app/services/czds_client.py b/backend/app/services/czds_client.py
index c11b5a3..0a0ca6c 100644
--- a/backend/app/services/czds_client.py
+++ b/backend/app/services/czds_client.py
@@ -227,11 +227,43 @@ class CZDSClient:
return None
async def save_domains(self, tld: str, domains: set[str]):
- """Save current domains to cache file."""
+ """Save current domains to cache file with date-based retention."""
+ from app.config import get_settings
+ settings = get_settings()
+
+ # Save current file (for next sync comparison)
cache_file = self.data_dir / f"{tld}_domains.txt"
cache_file.write_text("\n".join(sorted(domains)))
+
+ # Also save dated snapshot for retention
+ today = datetime.now().strftime("%Y-%m-%d")
+ dated_file = self.data_dir / f"{tld}_domains_{today}.txt"
+ if not dated_file.exists():
+ dated_file.write_text("\n".join(sorted(domains)))
+ logger.info(f"Saved snapshot: {dated_file.name}")
+
+ # Cleanup old snapshots (keep last N days)
+ retention_days = getattr(settings, 'zone_retention_days', 3)
+ await self._cleanup_old_snapshots(tld, retention_days)
+
logger.info(f"Saved {len(domains):,} domains for .{tld}")
+ async def _cleanup_old_snapshots(self, tld: str, keep_days: int = 3):
+ """Remove zone file snapshots older than keep_days."""
+ import re
+ from datetime import timedelta
+
+ cutoff = datetime.now() - timedelta(days=keep_days)
+ pattern = re.compile(rf"^{tld}_domains_(\d{{4}}-\d{{2}}-\d{{2}})\.txt$")
+
+ for file in self.data_dir.glob(f"{tld}_domains_*.txt"):
+ match = pattern.match(file.name)
+ if match:
+ file_date = datetime.strptime(match.group(1), "%Y-%m-%d")
+ if file_date < cutoff:
+ file.unlink()
+ logger.info(f"Deleted old snapshot: {file.name}")
+
async def process_drops(
self,
db: AsyncSession,
@@ -240,87 +272,66 @@ class CZDSClient:
current: set[str]
) -> list[dict]:
"""
- Find dropped domains and verify they are ACTUALLY available before storing.
+ Find dropped domains and store them directly.
- Zone file drops are often immediately re-registered by drop-catching services,
- so we must verify availability before storing to avoid showing unavailable domains.
+ NOTE: We do NOT verify availability here to avoid RDAP rate limits/bans.
+ Verification happens separately in the 'verify_drops' scheduler job
+ which runs in small batches throughout the day.
"""
- from app.services.domain_checker import domain_checker
-
dropped = previous - current
if not dropped:
logger.info(f"No dropped domains found for .{tld}")
return []
- logger.info(f"Found {len(dropped):,} potential drops for .{tld}, verifying availability...")
+ logger.info(f"Found {len(dropped):,} dropped domains for .{tld}, saving to database...")
today = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
- # Filter to valuable domains first (short, no numbers, no hyphens)
- valuable_drops = [
- name for name in dropped
- if len(name) <= 10 and not name.isdigit() and '-' not in name
- ]
-
- # Also include some longer domains (up to 500 total)
- other_drops = [
- name for name in dropped
- if name not in valuable_drops and len(name) <= 15
- ][:max(0, 500 - len(valuable_drops))]
-
- candidates = valuable_drops + other_drops
- logger.info(f"Checking availability of {len(candidates)} candidates (of {len(dropped):,} total drops)")
-
- # Verify availability and only store truly available domains
+ # Store all drops - availability will be verified separately
dropped_records = []
- available_count = 0
- checked_count = 0
+ batch_size = 1000
+ dropped_list = list(dropped)
- for i, name in enumerate(candidates):
- full_domain = f"{name}.{tld}"
+ for i in range(0, len(dropped_list), batch_size):
+ batch = dropped_list[i:i + batch_size]
- try:
- # Quick DNS check
- result = await domain_checker.check_domain(full_domain)
- checked_count += 1
-
- if result.is_available:
- available_count += 1
+ for name in batch:
+ try:
record = DroppedDomain(
- domain=full_domain,
+ domain=name, # Just the name, not full domain!
tld=tld,
dropped_date=today,
length=len(name),
is_numeric=name.isdigit(),
- has_hyphen='-' in name
+ has_hyphen='-' in name,
+ availability_status='unknown' # Will be verified later
)
db.add(record)
dropped_records.append({
- "domain": full_domain,
+ "domain": f"{name}.{tld}",
"length": len(name),
- "is_numeric": name.isdigit(),
- "has_hyphen": '-' in name
})
-
- # Progress log every 100 domains
- if (i + 1) % 100 == 0:
- logger.info(f"Verified {i + 1}/{len(candidates)}: {available_count} available so far")
-
- # Small delay to avoid rate limiting
- if i % 20 == 0:
- await asyncio.sleep(0.1)
-
- except Exception as e:
- logger.warning(f"Error checking {full_domain}: {e}")
+ except Exception as e:
+ # Duplicate or other error - skip
+ pass
+
+ # Commit batch
+ try:
+ await db.commit()
+ except Exception:
+ await db.rollback()
+
+ if (i + batch_size) % 5000 == 0:
+ logger.info(f"Saved {min(i + batch_size, len(dropped_list)):,}/{len(dropped_list):,} drops")
- await db.commit()
+ # Final commit
+ try:
+ await db.commit()
+ except Exception:
+ await db.rollback()
- logger.info(
- f"CZDS drops for .{tld}: "
- f"{checked_count} verified, {available_count} actually available, "
- f"{len(dropped_records)} stored"
- )
+ logger.info(f"CZDS drops for .{tld}: {len(dropped_records):,} saved (verification pending)")
return dropped_records
@@ -371,7 +382,9 @@ class CZDSClient:
result["current_count"] = len(current_domains)
# Clean up zone file (can be very large)
- zone_path.unlink()
+ # Note: Parser may have already deleted the file during cleanup_ram_drive()
+ if zone_path.exists():
+ zone_path.unlink()
# Get previous snapshot
previous_domains = await self.get_previous_domains(tld)
diff --git a/backend/app/services/zone_file.py b/backend/app/services/zone_file.py
index edae707..0e81d3b 100644
--- a/backend/app/services/zone_file.py
+++ b/backend/app/services/zone_file.py
@@ -181,88 +181,65 @@ class ZoneFileService:
current: set[str]
) -> list[dict]:
"""
- Find dropped domains and verify they are ACTUALLY available before storing.
+ Find dropped domains and store them directly.
- Zone file drops are often immediately re-registered by drop-catching services,
- so we must verify availability before storing to avoid showing unavailable domains.
+ NOTE: We do NOT verify availability via RDAP here to avoid rate limits/bans.
+ Zone file diff is already a reliable signal that the domain was dropped.
"""
- from app.services.domain_checker import domain_checker
-
dropped = previous - current
if not dropped:
logger.info(f"No dropped domains found for .{tld}")
return []
- logger.info(f"Found {len(dropped)} potential drops for .{tld}, verifying availability...")
+ logger.info(f"Found {len(dropped):,} dropped domains for .{tld}, saving to database...")
today = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
- # Filter to valuable domains first (short, no numbers, no hyphens)
- # This reduces the number of availability checks needed
- valuable_drops = [
- name for name in dropped
- if len(name) <= 10 and not name.isdigit() and '-' not in name
- ]
-
- # Also include some longer domains (up to 500 total)
- other_drops = [
- name for name in dropped
- if name not in valuable_drops and len(name) <= 15
- ][:max(0, 500 - len(valuable_drops))]
-
- candidates = valuable_drops + other_drops
- logger.info(f"Checking availability of {len(candidates)} candidates (of {len(dropped)} total drops)")
-
- # Verify availability and only store truly available domains
+ # Store all drops - no RDAP verification (prevents bans!)
dropped_records = []
- available_count = 0
- checked_count = 0
+ batch_size = 1000
+ dropped_list = list(dropped)
- for i, name in enumerate(candidates):
- full_domain = f"{name}.{tld}"
+ for i in range(0, len(dropped_list), batch_size):
+ batch = dropped_list[i:i + batch_size]
- try:
- # Quick DNS check
- result = await domain_checker.check_domain(full_domain)
- checked_count += 1
-
- if result.is_available:
- available_count += 1
+ for name in batch:
+ try:
record = DroppedDomain(
- domain=full_domain,
+ domain=name, # Just the name, not full domain!
tld=tld,
dropped_date=today,
length=len(name),
is_numeric=name.isdigit(),
- has_hyphen='-' in name
+ has_hyphen='-' in name,
+ availability_status='unknown'
)
db.add(record)
dropped_records.append({
- "domain": full_domain,
+ "domain": f"{name}.{tld}",
"length": len(name),
- "is_numeric": name.isdigit(),
- "has_hyphen": '-' in name
})
-
- # Progress log every 100 domains
- if (i + 1) % 100 == 0:
- logger.info(f"Verified {i + 1}/{len(candidates)}: {available_count} available so far")
-
- # Small delay to avoid rate limiting
- if i % 20 == 0:
- await asyncio.sleep(0.1)
-
- except Exception as e:
- logger.warning(f"Error checking {full_domain}: {e}")
+ except Exception:
+ # Duplicate or other error - skip
+ pass
+
+ # Commit batch
+ try:
+ await db.commit()
+ except Exception:
+ await db.rollback()
+
+ if (i + batch_size) % 5000 == 0:
+ logger.info(f"Saved {min(i + batch_size, len(dropped_list)):,}/{len(dropped_list):,} drops")
- await db.commit()
+ # Final commit
+ try:
+ await db.commit()
+ except Exception:
+ await db.rollback()
- logger.info(
- f"Zone file drops for .{tld}: "
- f"{checked_count} verified, {available_count} actually available, "
- f"{len(dropped_records)} stored"
- )
+ logger.info(f"Zone drops for .{tld}: {len(dropped_records):,} saved (verification pending)")
return dropped_records
diff --git a/backend/app/services/zone_file_parser.py b/backend/app/services/zone_file_parser.py
index e11d62b..2bb9840 100644
--- a/backend/app/services/zone_file_parser.py
+++ b/backend/app/services/zone_file_parser.py
@@ -44,16 +44,34 @@ def get_optimal_workers() -> int:
def get_ram_drive_path() -> Optional[Path]:
"""
- Get path to RAM drive if available.
- Linux: /dev/shm (typically 50% of RAM)
- macOS: /tmp is often memory-backed
+ Get path for temporary zone file processing.
+
+ Priority:
+ 1. CZDS_DATA_DIR environment variable (persistent storage)
+ 2. /data/czds (Docker volume mount)
+ 3. /tmp fallback
+
+ Note: We avoid /dev/shm in Docker as it's typically limited to 64MB.
+ With 1.7TB disk and NVMe, disk-based processing is fast enough.
"""
- # Linux RAM drive
- if os.path.exists("/dev/shm"):
- shm_path = Path("/dev/shm/pounce_zones")
+ from app.config import get_settings
+
+ # Use configured data directory (mounted volume)
+ settings = get_settings()
+ if settings.czds_data_dir:
+ data_path = Path(settings.czds_data_dir) / "tmp"
try:
- shm_path.mkdir(parents=True, exist_ok=True)
- return shm_path
+ data_path.mkdir(parents=True, exist_ok=True)
+ return data_path
+ except PermissionError:
+ pass
+
+ # Docker volume mount
+ if os.path.exists("/data/czds"):
+ data_path = Path("/data/czds/tmp")
+ try:
+ data_path.mkdir(parents=True, exist_ok=True)
+ return data_path
except PermissionError:
pass
diff --git a/frontend/Dockerfile b/frontend/Dockerfile
index 41021cb..1d96de7 100644
--- a/frontend/Dockerfile
+++ b/frontend/Dockerfile
@@ -15,7 +15,9 @@ COPY . .
# Build arguments
ARG NEXT_PUBLIC_API_URL
+ARG BACKEND_URL
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
+ENV BACKEND_URL=${BACKEND_URL}
ENV NODE_OPTIONS="--max-old-space-size=2048"
ENV NEXT_TELEMETRY_DISABLED=1
diff --git a/frontend/src/components/hunt/DropsTab.tsx b/frontend/src/components/hunt/DropsTab.tsx
index 94faf12..c7ce156 100644
--- a/frontend/src/components/hunt/DropsTab.tsx
+++ b/frontend/src/components/hunt/DropsTab.tsx
@@ -559,12 +559,41 @@ export function DropsTab({ showToast }: DropsTabProps) {
const isTrackingThis = trackingDrop === item.id
const status = item.availability_status || 'unknown'
- // Simplified status display config
+ // Status display config with better labels
+ const countdown = item.deletion_date ? formatCountdown(item.deletion_date) : null
const statusConfig = {
- available: { label: 'Available', color: 'text-accent', bg: 'bg-accent/10', border: 'border-accent/30', icon: CheckCircle2 },
- dropping_soon: { label: 'Dropping Soon', color: 'text-amber-400', bg: 'bg-amber-400/10', border: 'border-amber-400/30', icon: Clock },
- taken: { label: 'Taken', color: 'text-rose-400', bg: 'bg-rose-400/10', border: 'border-rose-400/30', icon: Ban },
- unknown: { label: 'Check', color: 'text-white/50', bg: 'bg-white/5', border: 'border-white/20', icon: Search },
+ available: {
+ label: 'Available Now',
+ color: 'text-accent',
+ bg: 'bg-accent/10',
+ border: 'border-accent/30',
+ icon: CheckCircle2,
+ showBuy: true,
+ },
+ dropping_soon: {
+ label: countdown ? `In Transition • ${countdown}` : 'In Transition',
+ color: 'text-amber-400',
+ bg: 'bg-amber-400/10',
+ border: 'border-amber-400/30',
+ icon: Clock,
+ showBuy: false,
+ },
+ taken: {
+ label: 'Re-registered',
+ color: 'text-rose-400/60',
+ bg: 'bg-rose-400/5',
+ border: 'border-rose-400/20',
+ icon: Ban,
+ showBuy: false,
+ },
+ unknown: {
+ label: 'Check Status',
+ color: 'text-white/50',
+ bg: 'bg-white/5',
+ border: 'border-white/20',
+ icon: Search,
+ showBuy: false,
+ },
}[status]
const StatusIcon = statusConfig.icon
@@ -594,14 +623,12 @@ export function DropsTab({ showToast }: DropsTabProps) {
onClick={() => checkStatus(item.id, fullDomain)}
disabled={isChecking}
className={clsx(
- "text-[10px] font-mono font-bold px-2.5 py-1 border flex items-center gap-1",
+ "text-[10px] font-mono font-bold px-2.5 py-1 border flex items-center gap-1.5",
statusConfig.color, statusConfig.bg, statusConfig.border
)}
>
{isChecking ?