pounce/backend/app/services/llm_agent.py
Yves Gugger 8f6e13ffcf
Some checks failed
CI / Frontend Lint & Type Check (push) Has been cancelled
CI / Frontend Build (push) Has been cancelled
CI / Backend Lint (push) Has been cancelled
CI / Backend Tests (push) Has been cancelled
CI / Docker Build (push) Has been cancelled
CI / Security Scan (push) Has been cancelled
Deploy / Build & Push Images (push) Has been cancelled
Deploy / Deploy to Server (push) Has been cancelled
Deploy / Notify (push) Has been cancelled
LLM Agent: tool-calling endpoint + HunterCompanion uses /llm/agent
2025-12-17 14:30:25 +01:00

166 lines
5.4 KiB
Python

from __future__ import annotations
import json
from typing import Any, AsyncIterator, Optional
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import get_settings
from app.models.subscription import Subscription, SubscriptionTier
from app.models.user import User
from app.services.llm_gateway import chat_completions, chat_completions_stream
from app.services.llm_tools import execute_tool, tool_catalog_for_prompt
settings = get_settings()
def _tier_level(tier: str) -> int:
t = (tier or "").lower()
if t == "tycoon":
return 3
if t == "trader":
return 2
return 1
async def _get_user_tier(db: AsyncSession, user: User) -> str:
from sqlalchemy import select
res = await db.execute(select(Subscription).where(Subscription.user_id == user.id))
sub = res.scalar_one_or_none()
if not sub:
return "scout"
return sub.tier.value
def _build_system_prompt(path: str) -> str:
tools = tool_catalog_for_prompt(path)
return (
"You are the Pounce Hunter Companion (domain trading expert). Always respond in English.\n"
"You have access to internal tools that return live data. Use tools when needed.\n\n"
"TOOL CALLING PROTOCOL:\n"
"- If you need data, respond with ONLY a JSON object:\n"
' {"tool_calls":[{"name":"tool_name","args":{...}}, ...]}\n'
"- Do not include any other text when requesting tools.\n"
"- After tools are executed, you will receive TOOL_RESULT messages.\n"
"- When you are ready to answer the user, respond normally (not JSON).\n\n"
"AVAILABLE TOOLS (JSON schemas):\n"
f"{json.dumps(tools, ensure_ascii=False)}\n\n"
"RULES:\n"
"- Never claim you checked external sources unless the user provided the data.\n"
"- Keep answers practical and decisive. If domain-related: include BUY/CONSIDER/SKIP + bullets.\n"
)
def _try_parse_tool_calls(text: str) -> Optional[list[dict[str, Any]]]:
t = (text or "").strip()
if not (t.startswith("{") and "tool_calls" in t):
return None
try:
obj = json.loads(t)
except Exception:
return None
calls = obj.get("tool_calls")
if not isinstance(calls, list):
return None
out: list[dict[str, Any]] = []
for c in calls:
if not isinstance(c, dict):
continue
name = c.get("name")
args = c.get("args") or {}
if isinstance(name, str) and isinstance(args, dict):
out.append({"name": name, "args": args})
return out or None
def _truncate_json(value: Any, max_chars: int = 8000) -> str:
s = json.dumps(value, ensure_ascii=False)
if len(s) <= max_chars:
return s
return s[: max_chars - 3] + "..."
async def run_agent(
db: AsyncSession,
user: User,
*,
messages: list[dict[str, Any]],
path: str,
model: Optional[str] = None,
temperature: float = 0.7,
max_steps: int = 6,
) -> list[dict[str, Any]]:
"""
Runs a small tool loop to augment context, returning final messages to be used
for the final answer generation (optionally streamed).
"""
tier = await _get_user_tier(db, user)
if _tier_level(tier) < 2:
raise PermissionError("Chat is available on Trader and Tycoon plans. Upgrade to unlock.")
base = [
{"role": "system", "content": _build_system_prompt(path)},
{"role": "system", "content": f"Context: current_terminal_path={path}; tier={tier}."},
]
convo = base + (messages or [])
for _ in range(max_steps):
payload = {
"model": model or settings.llm_default_model,
"messages": convo,
"temperature": temperature,
"stream": False,
}
res = await chat_completions(payload)
content = (res.get("choices") or [{}])[0].get("message", {}).get("content", "") or ""
tool_calls = _try_parse_tool_calls(content)
if not tool_calls:
# append assistant and stop
convo.append({"role": "assistant", "content": content})
return convo
# append the tool request as assistant message (so model can see its own plan)
convo.append({"role": "assistant", "content": content})
for call in tool_calls[:5]: # cap per step
name = call["name"]
args = call["args"]
result = await execute_tool(db, user, name, args, path=path)
convo.append(
{
"role": "system",
"content": f"TOOL_RESULT name={name} json={_truncate_json(result)}",
}
)
# Fallback: force final answer even if tool loop didn't converge
convo.append(
{
"role": "system",
"content": "Now answer the user with the best possible answer using the tool results. Do NOT request tools.",
}
)
return convo
async def stream_final_answer(convo: list[dict[str, Any]], *, model: Optional[str], temperature: float) -> AsyncIterator[bytes]:
payload = {
"model": model or settings.llm_default_model,
"messages": convo
+ [
{
"role": "system",
"content": "Final step: respond to the user. Do NOT output JSON tool_calls. Do NOT request tools.",
}
],
"temperature": temperature,
"stream": True,
}
async for chunk in chat_completions_stream(payload):
yield chunk