Commit
·
b7edd52
1
Parent(s):
a49a081
feat: cp stat
Browse files- .env.example +1 -1
- README.md +10 -10
- pyproject.toml +3 -13
- src/agents/agents.py +31 -40
- src/agents/bg_task_agent/task.py +0 -53
- src/agents/chatbot.py +0 -23
- src/agents/command_agent.py +0 -55
- src/agents/cpstat_agent.py +116 -0
- src/agents/{github_mcp_agent/github_mcp_agent.py → github_mcp_agent.py} +51 -33
- src/agents/interrupt_agent.py +0 -232
- src/agents/knowledge_base_agent.py +0 -174
- src/agents/langgraph_supervisor_agent.py +0 -62
- src/agents/langgraph_supervisor_hierarchy_agent.py +0 -46
- src/agents/middlewares/__init__.py +12 -0
- src/agents/middlewares/configurable_model.py +27 -0
- src/agents/middlewares/followup.py +89 -0
- src/agents/middlewares/safety.py +56 -0
- src/agents/middlewares/summarization.py +12 -0
- src/agents/{bg_task_agent/bg_task_agent.py → portfolio_agent.py} +45 -27
- src/agents/portfolio_agent/portfolio_agent.py +0 -150
- src/agents/portfolio_agent/tools/contest_ratings.py +0 -23
- src/agents/portfolio_agent/tools/database_search.py +0 -42
- src/agents/portfolio_agent/tools/github.py +0 -49
- src/agents/prompts/__init__.py +0 -0
- src/agents/prompts/cpstat.py +25 -0
- src/agents/prompts/followup.py +12 -0
- src/agents/prompts/github.py +32 -0
- src/agents/{portfolio_agent/prompt.py → prompts/portfolio.py} +5 -21
- src/agents/rag_assistant.py +0 -146
- src/agents/research_assistant.py +0 -148
- src/agents/tools.py +0 -56
- src/agents/tools/database_search.py +34 -0
- src/agents/utils.py +11 -13
- src/core/llm.py +1 -8
- src/core/settings.py +5 -3
- src/memory/postgres.py +1 -2
- src/schema/__init__.py +6 -0
- src/schema/schema.py +41 -0
- src/schema/task_data.py +0 -74
- src/scripts/create_chroma_db.py +0 -83
- src/service/agent_service.py +562 -0
- src/service/dependencies.py +23 -0
- src/service/router.py +130 -0
- src/service/service.py +14 -364
- src/service/utils.py +16 -0
- uv.lock +22 -509
.env.example
CHANGED
|
@@ -60,7 +60,7 @@ POSTGRES_PORT=
|
|
| 60 |
POSTGRES_DB=
|
| 61 |
|
| 62 |
# you will be able to identify AST connections in Postgres Connection Manager under this Application Name
|
| 63 |
-
# POSTGRES_APPLICATION_NAME = "
|
| 64 |
# set these values to customize the number of connections in the pool. Saver and store have independent connection pools
|
| 65 |
# POSTGRES_MIN_CONNECTIONS_PER_POOL=1
|
| 66 |
# POSTGRES_MAX_CONNECTIONS_PER_POOL= 3
|
|
|
|
| 60 |
POSTGRES_DB=
|
| 61 |
|
| 62 |
# you will be able to identify AST connections in Postgres Connection Manager under this Application Name
|
| 63 |
+
# POSTGRES_APPLICATION_NAME = "chatbot"
|
| 64 |
# set these values to customize the number of connections in the pool. Saver and store have independent connection pools
|
| 65 |
# POSTGRES_MIN_CONNECTIONS_PER_POOL=1
|
| 66 |
# POSTGRES_MAX_CONNECTIONS_PER_POOL= 3
|
README.md
CHANGED
|
@@ -9,8 +9,8 @@ pinned: false
|
|
| 9 |
|
| 10 |
# 🧰 AI Agent Service Toolkit
|
| 11 |
|
| 12 |
-
[.
|
| 208 |
|
| 209 |
-
## Projects built with or inspired by
|
| 210 |
|
| 211 |
The following are a few of the public projects that drew code or inspiration from this repo.
|
| 212 |
|
| 213 |
-
- **[PolyRAG](https://github.com/QuentinFuxa/PolyRAG)** - Extends
|
| 214 |
-
- **[alexrisch/agent-web-kit](https://github.com/alexrisch/agent-web-kit)** - A Next.JS frontend for
|
| 215 |
- **[raushan-in/dapa](https://github.com/raushan-in/dapa)** - Digital Arrest Protection App (DAPA) enables users to report financial scams and frauds efficiently via a user-friendly platform.
|
| 216 |
|
| 217 |
**Please create a pull request editing the README or open a discussion with any new ones to be added!** Would love to include more projects.
|
|
|
|
| 9 |
|
| 10 |
# 🧰 AI Agent Service Toolkit
|
| 11 |
|
| 12 |
+
[](https://github.com/JoshuaC215/chatbot/actions/workflows/test.yml) [](https://codecov.io/github/JoshuaC215/chatbot) [](https://github.com/JoshuaC215/chatbot/blob/main/pyproject.toml)
|
| 13 |
+
[](https://github.com/JoshuaC215/chatbot/blob/main/LICENSE) [](https://chatbot.streamlit.app/)
|
| 14 |
|
| 15 |
A full toolkit for running an AI agent service built with LangGraph, FastAPI and Streamlit.
|
| 16 |
|
|
|
|
| 22 |
|
| 23 |
## Overview
|
| 24 |
|
| 25 |
+
### [Try the app!](https://chatbot.streamlit.app/)
|
| 26 |
|
| 27 |
+
<a href="https://chatbot.streamlit.app/"><img src="media/app_screenshot.png" width="600"></a>
|
| 28 |
|
| 29 |
### Quickstart
|
| 30 |
|
|
|
|
| 34 |
# At least one LLM API key is required
|
| 35 |
echo 'OPENAI_API_KEY=your_openai_api_key' >> .env
|
| 36 |
|
| 37 |
+
# uv is the recommended way to install chatbot, but "pip install ." also works
|
| 38 |
# For uv installation options, see: https://docs.astral.sh/uv/getting-started/installation/
|
| 39 |
curl -LsSf https://astral.sh/uv/0.7.19/install.sh | sh
|
| 40 |
|
|
|
|
| 90 |
1. Clone the repository:
|
| 91 |
|
| 92 |
```sh
|
| 93 |
+
git clone https://github.com/JoshuaC215/chatbot.git
|
| 94 |
+
cd chatbot
|
| 95 |
```
|
| 96 |
|
| 97 |
2. Set up environment variables:
|
|
|
|
| 206 |
|
| 207 |
4. Open your browser and navigate to the URL provided by Streamlit (usually `http://localhost:8501`).
|
| 208 |
|
| 209 |
+
## Projects built with or inspired by chatbot
|
| 210 |
|
| 211 |
The following are a few of the public projects that drew code or inspiration from this repo.
|
| 212 |
|
| 213 |
+
- **[PolyRAG](https://github.com/QuentinFuxa/PolyRAG)** - Extends chatbot with RAG capabilities over both PostgreSQL databases and PDF documents.
|
| 214 |
+
- **[alexrisch/agent-web-kit](https://github.com/alexrisch/agent-web-kit)** - A Next.JS frontend for chatbot
|
| 215 |
- **[raushan-in/dapa](https://github.com/raushan-in/dapa)** - Digital Arrest Protection App (DAPA) enables users to report financial scams and frauds efficiently via a user-friendly platform.
|
| 216 |
|
| 217 |
**Please create a pull request editing the README or open a discussion with any new ones to be added!** Would love to include more projects.
|
pyproject.toml
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
[project]
|
| 2 |
-
name = "
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Full toolkit for running an AI agent service built with LangGraph, FastAPI and Streamlit"
|
| 5 |
readme = "README.md"
|
| 6 |
-
authors = [{ name = "
|
| 7 |
classifiers = [
|
| 8 |
"Development Status :: 4 - Beta",
|
| 9 |
"License :: OSI Approved :: MIT License",
|
|
@@ -16,7 +16,6 @@ classifiers = [
|
|
| 16 |
requires-python = ">=3.11,<3.14"
|
| 17 |
|
| 18 |
dependencies = [
|
| 19 |
-
"docx2txt ~=0.8",
|
| 20 |
"duckduckgo-search>=7.3.0",
|
| 21 |
"fastapi ~=0.115.5",
|
| 22 |
"grpcio >=1.68.0",
|
|
@@ -38,25 +37,17 @@ dependencies = [
|
|
| 38 |
"langgraph-checkpoint-mongodb ~=0.1.3",
|
| 39 |
"langgraph-checkpoint-postgres ~=2.0.13",
|
| 40 |
"langgraph-checkpoint-sqlite ~=2.0.1",
|
| 41 |
-
"langgraph-supervisor ~=0.0.31",
|
| 42 |
"langsmith ~=0.4.0",
|
| 43 |
-
"numexpr ~=2.10.1",
|
| 44 |
-
"numpy ~=2.3.4",
|
| 45 |
-
"onnxruntime ~= 1.21.1",
|
| 46 |
-
"pandas ~=2.2.3",
|
| 47 |
"psycopg[binary,pool] ~=3.2.4",
|
| 48 |
-
"pyarrow >=18.1.0",
|
| 49 |
"pydantic ~=2.10.1",
|
| 50 |
"pydantic-settings ~=2.12.0",
|
| 51 |
-
"pypdf ~=5.3.0",
|
| 52 |
-
"pyowm ~=3.3.0",
|
| 53 |
"python-dotenv ~=1.0.1",
|
| 54 |
"setuptools ~=75.6.0",
|
| 55 |
-
"streamlit ~=1.52.0",
|
| 56 |
"tiktoken >=0.8.0",
|
| 57 |
"uvicorn ~=0.32.1",
|
| 58 |
"langchain-mcp-adapters>=0.1.10",
|
| 59 |
"ddgs>=9.9.1",
|
|
|
|
| 60 |
]
|
| 61 |
|
| 62 |
[dependency-groups]
|
|
@@ -78,7 +69,6 @@ client = [
|
|
| 78 |
"httpx~=0.28.0",
|
| 79 |
"pydantic ~=2.10.1",
|
| 80 |
"python-dotenv ~=1.0.1",
|
| 81 |
-
"streamlit~=1.52.0",
|
| 82 |
]
|
| 83 |
|
| 84 |
[tool.ruff]
|
|
|
|
| 1 |
[project]
|
| 2 |
+
name = "chatbot"
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Full toolkit for running an AI agent service built with LangGraph, FastAPI and Streamlit"
|
| 5 |
readme = "README.md"
|
| 6 |
+
authors = [{ name = "Anuj Joshi", email = "anujjoshi3105@gmail.com" }]
|
| 7 |
classifiers = [
|
| 8 |
"Development Status :: 4 - Beta",
|
| 9 |
"License :: OSI Approved :: MIT License",
|
|
|
|
| 16 |
requires-python = ">=3.11,<3.14"
|
| 17 |
|
| 18 |
dependencies = [
|
|
|
|
| 19 |
"duckduckgo-search>=7.3.0",
|
| 20 |
"fastapi ~=0.115.5",
|
| 21 |
"grpcio >=1.68.0",
|
|
|
|
| 37 |
"langgraph-checkpoint-mongodb ~=0.1.3",
|
| 38 |
"langgraph-checkpoint-postgres ~=2.0.13",
|
| 39 |
"langgraph-checkpoint-sqlite ~=2.0.1",
|
|
|
|
| 40 |
"langsmith ~=0.4.0",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
"psycopg[binary,pool] ~=3.2.4",
|
|
|
|
| 42 |
"pydantic ~=2.10.1",
|
| 43 |
"pydantic-settings ~=2.12.0",
|
|
|
|
|
|
|
| 44 |
"python-dotenv ~=1.0.1",
|
| 45 |
"setuptools ~=75.6.0",
|
|
|
|
| 46 |
"tiktoken >=0.8.0",
|
| 47 |
"uvicorn ~=0.32.1",
|
| 48 |
"langchain-mcp-adapters>=0.1.10",
|
| 49 |
"ddgs>=9.9.1",
|
| 50 |
+
"toons>=0.5.2",
|
| 51 |
]
|
| 52 |
|
| 53 |
[dependency-groups]
|
|
|
|
| 69 |
"httpx~=0.28.0",
|
| 70 |
"pydantic ~=2.10.1",
|
| 71 |
"python-dotenv ~=1.0.1",
|
|
|
|
| 72 |
]
|
| 73 |
|
| 74 |
[tool.ruff]
|
src/agents/agents.py
CHANGED
|
@@ -3,19 +3,10 @@ from dataclasses import dataclass
|
|
| 3 |
from langgraph.graph.state import CompiledStateGraph
|
| 4 |
from langgraph.pregel import Pregel
|
| 5 |
|
| 6 |
-
from agents.
|
| 7 |
-
from agents.
|
| 8 |
-
from agents.
|
| 9 |
-
from agents.github_mcp_agent.github_mcp_agent import github_mcp_agent
|
| 10 |
-
from agents.interrupt_agent import interrupt_agent
|
| 11 |
-
from agents.knowledge_base_agent import kb_agent
|
| 12 |
-
from agents.langgraph_supervisor_agent import langgraph_supervisor_agent
|
| 13 |
-
from agents.langgraph_supervisor_hierarchy_agent import langgraph_supervisor_hierarchy_agent
|
| 14 |
-
from agents.portfolio_agent.portfolio_agent import portfolio_agent
|
| 15 |
-
|
| 16 |
from agents.lazy_agent import LazyLoadingAgent
|
| 17 |
-
from agents.rag_assistant import rag_assistant
|
| 18 |
-
from agents.research_assistant import research_assistant
|
| 19 |
from schema import AgentInfo
|
| 20 |
|
| 21 |
DEFAULT_AGENT = "portfolio-agent"
|
|
@@ -31,41 +22,40 @@ AgentGraphLike = CompiledStateGraph | Pregel | LazyLoadingAgent # What can be s
|
|
| 31 |
class Agent:
|
| 32 |
description: str
|
| 33 |
graph_like: AgentGraphLike
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
agents: dict[str, Agent] = {
|
| 37 |
-
"chatbot": Agent(description="A simple chatbot.", graph_like=chatbot),
|
| 38 |
-
"research-assistant": Agent(
|
| 39 |
-
description="A research assistant with web search and calculator.",
|
| 40 |
-
graph_like=research_assistant,
|
| 41 |
-
),
|
| 42 |
-
"rag-assistant": Agent(
|
| 43 |
-
description="A RAG assistant with access to information in a database.",
|
| 44 |
-
graph_like=rag_assistant,
|
| 45 |
-
),
|
| 46 |
"portfolio-agent": Agent(
|
| 47 |
-
description="A portfolio assistant
|
| 48 |
graph_like=portfolio_agent,
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
description="A langgraph supervisor agent with a nested hierarchy of agents",
|
| 57 |
-
graph_like=langgraph_supervisor_hierarchy_agent,
|
| 58 |
-
),
|
| 59 |
-
"interrupt-agent": Agent(
|
| 60 |
-
description="An agent the uses interrupts.", graph_like=interrupt_agent
|
| 61 |
-
),
|
| 62 |
-
"knowledge-base-agent": Agent(
|
| 63 |
-
description="A retrieval-augmented generation agent using Amazon Bedrock Knowledge Base",
|
| 64 |
-
graph_like=kb_agent,
|
| 65 |
),
|
| 66 |
"github-mcp-agent": Agent(
|
| 67 |
-
description="
|
| 68 |
graph_like=github_mcp_agent,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
),
|
| 70 |
}
|
| 71 |
|
|
@@ -93,5 +83,6 @@ def get_agent(agent_id: str) -> AgentGraph:
|
|
| 93 |
|
| 94 |
def get_all_agent_info() -> list[AgentInfo]:
|
| 95 |
return [
|
| 96 |
-
AgentInfo(key=agent_id, description=agent.description
|
|
|
|
| 97 |
]
|
|
|
|
| 3 |
from langgraph.graph.state import CompiledStateGraph
|
| 4 |
from langgraph.pregel import Pregel
|
| 5 |
|
| 6 |
+
from agents.portfolio_agent import portfolio_agent
|
| 7 |
+
from agents.github_mcp_agent import github_mcp_agent
|
| 8 |
+
from agents.cpstat_agent import cpstat_agent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from agents.lazy_agent import LazyLoadingAgent
|
|
|
|
|
|
|
| 10 |
from schema import AgentInfo
|
| 11 |
|
| 12 |
DEFAULT_AGENT = "portfolio-agent"
|
|
|
|
| 22 |
class Agent:
|
| 23 |
description: str
|
| 24 |
graph_like: AgentGraphLike
|
| 25 |
+
prompts: list[str]
|
| 26 |
|
| 27 |
|
| 28 |
agents: dict[str, Agent] = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
"portfolio-agent": Agent(
|
| 30 |
+
description="A specialized portfolio assistant that can answer questions about Anuj Joshi's background, skills, projects, and experience using a curated knowledge base.",
|
| 31 |
graph_like=portfolio_agent,
|
| 32 |
+
prompts=[
|
| 33 |
+
"Tell me about Anuj's background",
|
| 34 |
+
"What are Anuj's key technical skills?",
|
| 35 |
+
"Show me some of Anuj's featured projects",
|
| 36 |
+
"What is Anuj's work experience?",
|
| 37 |
+
"How can I contact Anuj?",
|
| 38 |
+
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
),
|
| 40 |
"github-mcp-agent": Agent(
|
| 41 |
+
description="An agent equipped with GitHub MCP tools to explore repositories, view code, and track development activity.",
|
| 42 |
graph_like=github_mcp_agent,
|
| 43 |
+
prompts=[
|
| 44 |
+
"List anujjoshi3105's top repositories",
|
| 45 |
+
"Show recent activity on anujjoshi3105's GitHub",
|
| 46 |
+
"Tell me about the 'portfolio-bot' repository",
|
| 47 |
+
"Show me anujjoshi3105's contributions in the last month",
|
| 48 |
+
],
|
| 49 |
+
),
|
| 50 |
+
"cpstat-agent": Agent(
|
| 51 |
+
description="An agent specializing in Competitive Programming, capable of fetching real-time contest ratings and stats from various platforms.",
|
| 52 |
+
graph_like=cpstat_agent,
|
| 53 |
+
prompts=[
|
| 54 |
+
"Show Anuj's LeetCode rating and stats",
|
| 55 |
+
"What is Anuj's Codeforces rank?",
|
| 56 |
+
"Get recent contest performance for anujjoshi3105",
|
| 57 |
+
"Show CP stats across all platforms",
|
| 58 |
+
],
|
| 59 |
),
|
| 60 |
}
|
| 61 |
|
|
|
|
| 83 |
|
| 84 |
def get_all_agent_info() -> list[AgentInfo]:
|
| 85 |
return [
|
| 86 |
+
AgentInfo(key=agent_id, description=agent.description, prompts=agent.prompts)
|
| 87 |
+
for agent_id, agent in agents.items()
|
| 88 |
]
|
src/agents/bg_task_agent/task.py
DELETED
|
@@ -1,53 +0,0 @@
|
|
| 1 |
-
from typing import Literal
|
| 2 |
-
from uuid import uuid4
|
| 3 |
-
|
| 4 |
-
from langchain_core.messages import BaseMessage
|
| 5 |
-
from langgraph.types import StreamWriter
|
| 6 |
-
|
| 7 |
-
from agents.utils import CustomData
|
| 8 |
-
from schema.task_data import TaskData
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class Task:
|
| 12 |
-
def __init__(self, task_name: str, writer: StreamWriter | None = None) -> None:
|
| 13 |
-
self.name = task_name
|
| 14 |
-
self.id = str(uuid4())
|
| 15 |
-
self.state: Literal["new", "running", "complete"] = "new"
|
| 16 |
-
self.result: Literal["success", "error"] | None = None
|
| 17 |
-
self.writer = writer
|
| 18 |
-
|
| 19 |
-
def _generate_and_dispatch_message(self, writer: StreamWriter | None, data: dict):
|
| 20 |
-
writer = writer or self.writer
|
| 21 |
-
task_data = TaskData(name=self.name, run_id=self.id, state=self.state, data=data)
|
| 22 |
-
if self.result:
|
| 23 |
-
task_data.result = self.result
|
| 24 |
-
task_custom_data = CustomData(
|
| 25 |
-
type=self.name,
|
| 26 |
-
data=task_data.model_dump(),
|
| 27 |
-
)
|
| 28 |
-
if writer:
|
| 29 |
-
task_custom_data.dispatch(writer)
|
| 30 |
-
return task_custom_data.to_langchain()
|
| 31 |
-
|
| 32 |
-
def start(self, writer: StreamWriter | None = None, data: dict = {}) -> BaseMessage:
|
| 33 |
-
self.state = "new"
|
| 34 |
-
task_message = self._generate_and_dispatch_message(writer, data)
|
| 35 |
-
return task_message
|
| 36 |
-
|
| 37 |
-
def write_data(self, writer: StreamWriter | None = None, data: dict = {}) -> BaseMessage:
|
| 38 |
-
if self.state == "complete":
|
| 39 |
-
raise ValueError("Only incomplete tasks can output data.")
|
| 40 |
-
self.state = "running"
|
| 41 |
-
task_message = self._generate_and_dispatch_message(writer, data)
|
| 42 |
-
return task_message
|
| 43 |
-
|
| 44 |
-
def finish(
|
| 45 |
-
self,
|
| 46 |
-
result: Literal["success", "error"],
|
| 47 |
-
writer: StreamWriter | None = None,
|
| 48 |
-
data: dict = {},
|
| 49 |
-
) -> BaseMessage:
|
| 50 |
-
self.state = "complete"
|
| 51 |
-
self.result = result
|
| 52 |
-
task_message = self._generate_and_dispatch_message(writer, data)
|
| 53 |
-
return task_message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/chatbot.py
DELETED
|
@@ -1,23 +0,0 @@
|
|
| 1 |
-
from langchain_core.messages import BaseMessage
|
| 2 |
-
from langchain_core.runnables import RunnableConfig
|
| 3 |
-
from langgraph.func import entrypoint
|
| 4 |
-
|
| 5 |
-
from core import get_model, settings
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
@entrypoint()
|
| 9 |
-
async def chatbot(
|
| 10 |
-
inputs: dict[str, list[BaseMessage]],
|
| 11 |
-
*,
|
| 12 |
-
previous: dict[str, list[BaseMessage]],
|
| 13 |
-
config: RunnableConfig,
|
| 14 |
-
):
|
| 15 |
-
messages = inputs["messages"]
|
| 16 |
-
if previous:
|
| 17 |
-
messages = previous["messages"] + messages
|
| 18 |
-
|
| 19 |
-
model = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 20 |
-
response = await model.ainvoke(messages)
|
| 21 |
-
return entrypoint.final(
|
| 22 |
-
value={"messages": [response]}, save={"messages": messages + [response]}
|
| 23 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/command_agent.py
DELETED
|
@@ -1,55 +0,0 @@
|
|
| 1 |
-
import random
|
| 2 |
-
from typing import Literal
|
| 3 |
-
|
| 4 |
-
from langchain_core.messages import AIMessage
|
| 5 |
-
from langgraph.graph import START, MessagesState, StateGraph
|
| 6 |
-
from langgraph.types import Command
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
class AgentState(MessagesState, total=False):
|
| 10 |
-
"""`total=False` is PEP589 specs.
|
| 11 |
-
|
| 12 |
-
documentation: https://typing.readthedocs.io/en/latest/spec/typeddict.html#totality
|
| 13 |
-
"""
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
# Define the nodes
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def node_a(state: AgentState) -> Command[Literal["node_b", "node_c"]]:
|
| 20 |
-
print("Called A")
|
| 21 |
-
value = random.choice(["a", "b"])
|
| 22 |
-
goto: Literal["node_b", "node_c"]
|
| 23 |
-
# this is a replacement for a conditional edge function
|
| 24 |
-
if value == "a":
|
| 25 |
-
goto = "node_b"
|
| 26 |
-
else:
|
| 27 |
-
goto = "node_c"
|
| 28 |
-
|
| 29 |
-
# note how Command allows you to BOTH update the graph state AND route to the next node
|
| 30 |
-
return Command(
|
| 31 |
-
# this is the state update
|
| 32 |
-
update={"messages": [AIMessage(content=f"Hello {value}")]},
|
| 33 |
-
# this is a replacement for an edge
|
| 34 |
-
goto=goto,
|
| 35 |
-
)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def node_b(state: AgentState):
|
| 39 |
-
print("Called B")
|
| 40 |
-
return {"messages": [AIMessage(content="Hello B")]}
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def node_c(state: AgentState):
|
| 44 |
-
print("Called C")
|
| 45 |
-
return {"messages": [AIMessage(content="Hello C")]}
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
builder = StateGraph(AgentState)
|
| 49 |
-
builder.add_edge(START, "node_a")
|
| 50 |
-
builder.add_node(node_a)
|
| 51 |
-
builder.add_node(node_b)
|
| 52 |
-
builder.add_node(node_c)
|
| 53 |
-
# NOTE: there are no edges between nodes A, B and C!
|
| 54 |
-
|
| 55 |
-
command_agent = builder.compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/cpstat_agent.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
from langchain.agents import create_agent
|
| 4 |
+
from langchain_core.tools import BaseTool
|
| 5 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 6 |
+
from langchain_mcp_adapters.sessions import StreamableHttpConnection
|
| 7 |
+
from langgraph.graph.state import CompiledStateGraph
|
| 8 |
+
|
| 9 |
+
from agents.utils import filter_mcp_tools
|
| 10 |
+
from agents.lazy_agent import LazyLoadingAgent
|
| 11 |
+
from agents.middlewares import ConfigurableModelMiddleware
|
| 12 |
+
from agents.middlewares.summarization import SummarizationMiddleware
|
| 13 |
+
from agents.prompts.cpstat import SYSTEM_PROMPT
|
| 14 |
+
from core import get_model, settings
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
ALLOWED_TOOLS = {
|
| 19 |
+
# LeetCode - User Stats & Profile
|
| 20 |
+
"leetcode_get_rating",
|
| 21 |
+
"leetcode_get_profile",
|
| 22 |
+
"leetcode_get_details",
|
| 23 |
+
"leetcode_get_badges",
|
| 24 |
+
"leetcode_get_solved",
|
| 25 |
+
"leetcode_get_contest_ranking",
|
| 26 |
+
"leetcode_get_contest_history",
|
| 27 |
+
"leetcode_get_submissions",
|
| 28 |
+
"leetcode_get_ac_submissions",
|
| 29 |
+
"leetcode_get_calendar",
|
| 30 |
+
"leetcode_get_skill_stats",
|
| 31 |
+
"leetcode_get_languages",
|
| 32 |
+
"leetcode_get_progress",
|
| 33 |
+
"leetcode_get_contest_ranking_info",
|
| 34 |
+
|
| 35 |
+
# Codeforces - User Stats & Content
|
| 36 |
+
"codeforces_get_rating",
|
| 37 |
+
"codeforces_get_contest_history",
|
| 38 |
+
"codeforces_get_user_status",
|
| 39 |
+
"codeforces_get_user_blogs",
|
| 40 |
+
"codeforces_get_solved_problems",
|
| 41 |
+
"codeforces_get_blog_entry",
|
| 42 |
+
"codeforces_get_blog_comments",
|
| 43 |
+
|
| 44 |
+
# AtCoder
|
| 45 |
+
"atcoder_get_rating",
|
| 46 |
+
"atcoder_get_history",
|
| 47 |
+
|
| 48 |
+
# CodeChef
|
| 49 |
+
"codechef_get_rating",
|
| 50 |
+
|
| 51 |
+
# GeeksforGeeks
|
| 52 |
+
"gfg_get_rating",
|
| 53 |
+
"gfg_get_submissions",
|
| 54 |
+
"gfg_get_posts",
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class CPStatAgent(LazyLoadingAgent):
|
| 59 |
+
"""CP Stat Agent with async initialization for contest and rating info."""
|
| 60 |
+
|
| 61 |
+
def __init__(self) -> None:
|
| 62 |
+
super().__init__()
|
| 63 |
+
self._mcp_tools: list[BaseTool] = []
|
| 64 |
+
self._mcp_client: MultiServerMCPClient | None = None
|
| 65 |
+
|
| 66 |
+
async def load(self) -> None:
|
| 67 |
+
"""Initialize the CP Stat agent by loading MCP tools from the Contest API."""
|
| 68 |
+
self._model = get_model(settings.DEFAULT_MODEL)
|
| 69 |
+
|
| 70 |
+
if not settings.CONTEST_API_URL:
|
| 71 |
+
logger.info("CONTEST_API_URL is not set, CP Stat agent will have no tools")
|
| 72 |
+
self._mcp_tools = []
|
| 73 |
+
self._graph = self._create_graph()
|
| 74 |
+
self._loaded = True
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
connections = {
|
| 79 |
+
"cpstat": StreamableHttpConnection(
|
| 80 |
+
transport="streamable_http",
|
| 81 |
+
url=settings.CONTEST_API_URL,
|
| 82 |
+
)
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
self._mcp_client = MultiServerMCPClient(connections)
|
| 86 |
+
logger.info(f"CP Stat client initialized successfully with URL: {settings.CONTEST_API_URL}")
|
| 87 |
+
|
| 88 |
+
all_tools = await self._mcp_client.get_tools()
|
| 89 |
+
self._mcp_tools = filter_mcp_tools(all_tools, ALLOWED_TOOLS)
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
logger.error(f"Failed to initialize CP Stat agent: {e}")
|
| 93 |
+
self._mcp_tools = []
|
| 94 |
+
self._mcp_client = None
|
| 95 |
+
|
| 96 |
+
# Create and store the graph
|
| 97 |
+
self._graph = self._create_graph()
|
| 98 |
+
self._loaded = True
|
| 99 |
+
|
| 100 |
+
def _create_graph(self) -> CompiledStateGraph:
|
| 101 |
+
"""Create the CP Stat agent graph."""
|
| 102 |
+
return create_agent(
|
| 103 |
+
model=self._model,
|
| 104 |
+
tools=self._mcp_tools,
|
| 105 |
+
middleware=[
|
| 106 |
+
ConfigurableModelMiddleware(),
|
| 107 |
+
SummarizationMiddleware(model=self._model),
|
| 108 |
+
],
|
| 109 |
+
name="cpstat-agent",
|
| 110 |
+
system_prompt=SYSTEM_PROMPT,
|
| 111 |
+
debug=True,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Create the agent instance
|
| 116 |
+
cpstat_agent = CPStatAgent()
|
src/agents/{github_mcp_agent/github_mcp_agent.py → github_mcp_agent.py}
RENAMED
|
@@ -1,7 +1,4 @@
|
|
| 1 |
-
"""GitHub MCP Agent - An agent that uses GitHub MCP tools for repository management."""
|
| 2 |
-
|
| 3 |
import logging
|
| 4 |
-
from datetime import datetime
|
| 5 |
|
| 6 |
from langchain.agents import create_agent
|
| 7 |
from langchain_core.tools import BaseTool
|
|
@@ -9,39 +6,57 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
| 9 |
from langchain_mcp_adapters.sessions import StreamableHttpConnection
|
| 10 |
from langgraph.graph.state import CompiledStateGraph
|
| 11 |
|
|
|
|
| 12 |
from agents.lazy_agent import LazyLoadingAgent
|
|
|
|
|
|
|
| 13 |
from core import get_model, settings
|
|
|
|
| 14 |
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
Guidelines:
|
| 32 |
-
- Always be helpful and provide clear explanations of GitHub operations
|
| 33 |
-
- When creating or modifying content, ensure it follows best practices
|
| 34 |
-
- Be cautious with destructive operations (deletes, force pushes, etc.)
|
| 35 |
-
- Provide context about what you're doing and why
|
| 36 |
-
- Use appropriate commit messages and PR descriptions
|
| 37 |
-
- Respect repository permissions and access controls
|
| 38 |
|
| 39 |
-
NOTE: You have access to GitHub MCP tools that provide direct GitHub API access.
|
| 40 |
-
"""
|
| 41 |
|
| 42 |
|
| 43 |
class GitHubMCPAgent(LazyLoadingAgent):
|
| 44 |
-
"""GitHub MCP Agent with async initialization."""
|
| 45 |
|
| 46 |
def __init__(self) -> None:
|
| 47 |
super().__init__()
|
|
@@ -50,6 +65,7 @@ class GitHubMCPAgent(LazyLoadingAgent):
|
|
| 50 |
|
| 51 |
async def load(self) -> None:
|
| 52 |
"""Initialize the GitHub MCP agent by loading MCP tools."""
|
|
|
|
| 53 |
if not settings.GITHUB_PAT:
|
| 54 |
logger.info("GITHUB_PAT is not set, GitHub MCP agent will have no tools")
|
| 55 |
self._mcp_tools = []
|
|
@@ -73,9 +89,8 @@ class GitHubMCPAgent(LazyLoadingAgent):
|
|
| 73 |
self._mcp_client = MultiServerMCPClient(connections)
|
| 74 |
logger.info("MCP client initialized successfully")
|
| 75 |
|
| 76 |
-
|
| 77 |
-
self._mcp_tools =
|
| 78 |
-
logger.info(f"GitHub MCP agent initialized with {len(self._mcp_tools)} tools")
|
| 79 |
|
| 80 |
except Exception as e:
|
| 81 |
logger.error(f"Failed to initialize GitHub MCP agent: {e}")
|
|
@@ -88,13 +103,16 @@ class GitHubMCPAgent(LazyLoadingAgent):
|
|
| 88 |
|
| 89 |
def _create_graph(self) -> CompiledStateGraph:
|
| 90 |
"""Create the GitHub MCP agent graph."""
|
| 91 |
-
model = get_model(settings.DEFAULT_MODEL)
|
| 92 |
-
|
| 93 |
return create_agent(
|
| 94 |
-
model=
|
| 95 |
tools=self._mcp_tools,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
name="github-mcp-agent",
|
| 97 |
-
system_prompt=
|
|
|
|
| 98 |
)
|
| 99 |
|
| 100 |
|
|
|
|
|
|
|
|
|
|
| 1 |
import logging
|
|
|
|
| 2 |
|
| 3 |
from langchain.agents import create_agent
|
| 4 |
from langchain_core.tools import BaseTool
|
|
|
|
| 6 |
from langchain_mcp_adapters.sessions import StreamableHttpConnection
|
| 7 |
from langgraph.graph.state import CompiledStateGraph
|
| 8 |
|
| 9 |
+
from agents.utils import filter_mcp_tools
|
| 10 |
from agents.lazy_agent import LazyLoadingAgent
|
| 11 |
+
from agents.middlewares import ConfigurableModelMiddleware
|
| 12 |
+
from agents.middlewares.summarization import SummarizationMiddleware
|
| 13 |
from core import get_model, settings
|
| 14 |
+
from agents.prompts.github import SYSTEM_PROMPT
|
| 15 |
|
| 16 |
logger = logging.getLogger(__name__)
|
| 17 |
|
| 18 |
+
ALLOWED_TOOLS = {
|
| 19 |
+
# User & Profile
|
| 20 |
+
"get_me", # Get authenticated user's profile
|
| 21 |
+
"get_teams", # Get teams the user belongs to
|
| 22 |
+
"get_team_members", # Get team member usernames
|
| 23 |
+
|
| 24 |
+
# Repository & Code
|
| 25 |
+
"search_repositories", # Find repositories by name/description
|
| 26 |
+
"get_file_contents", # Get file/directory contents
|
| 27 |
+
"search_code", # Search code across repositories
|
| 28 |
+
"list_branches", # List repository branches
|
| 29 |
+
|
| 30 |
+
# Activity & Contributions
|
| 31 |
+
"list_commits", # List commits in a repository
|
| 32 |
+
"get_commit", # Get commit details with diff
|
| 33 |
+
"list_pull_requests", # List PRs in a repository
|
| 34 |
+
"pull_request_read", # Get PR details, diff, reviews
|
| 35 |
+
"search_pull_requests",# Search PRs by author
|
| 36 |
+
|
| 37 |
+
# Issues
|
| 38 |
+
"list_issues", # List issues in a repository
|
| 39 |
+
"issue_read", # Get issue details/comments
|
| 40 |
+
"search_issues", # Search issues
|
| 41 |
+
|
| 42 |
+
# Releases & Tags
|
| 43 |
+
"list_releases", # List releases
|
| 44 |
+
"get_latest_release", # Get latest release
|
| 45 |
+
"get_release_by_tag", # Get release by tag
|
| 46 |
+
"list_tags", # List git tags
|
| 47 |
+
"get_tag", # Get tag details
|
| 48 |
+
|
| 49 |
+
# Discovery
|
| 50 |
+
"search_users", # Find GitHub users
|
| 51 |
+
"get_label", # Get repository label
|
| 52 |
+
"list_issue_types", # List issue types for org
|
| 53 |
+
}
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
|
|
|
|
|
|
| 56 |
|
| 57 |
|
| 58 |
class GitHubMCPAgent(LazyLoadingAgent):
|
| 59 |
+
"""GitHub MCP Agent with async initialization for portfolio assistant."""
|
| 60 |
|
| 61 |
def __init__(self) -> None:
|
| 62 |
super().__init__()
|
|
|
|
| 65 |
|
| 66 |
async def load(self) -> None:
|
| 67 |
"""Initialize the GitHub MCP agent by loading MCP tools."""
|
| 68 |
+
self._model = get_model(settings.DEFAULT_MODEL)
|
| 69 |
if not settings.GITHUB_PAT:
|
| 70 |
logger.info("GITHUB_PAT is not set, GitHub MCP agent will have no tools")
|
| 71 |
self._mcp_tools = []
|
|
|
|
| 89 |
self._mcp_client = MultiServerMCPClient(connections)
|
| 90 |
logger.info("MCP client initialized successfully")
|
| 91 |
|
| 92 |
+
all_tools = await self._mcp_client.get_tools()
|
| 93 |
+
self._mcp_tools = filter_mcp_tools(all_tools, ALLOWED_TOOLS)
|
|
|
|
| 94 |
|
| 95 |
except Exception as e:
|
| 96 |
logger.error(f"Failed to initialize GitHub MCP agent: {e}")
|
|
|
|
| 103 |
|
| 104 |
def _create_graph(self) -> CompiledStateGraph:
|
| 105 |
"""Create the GitHub MCP agent graph."""
|
|
|
|
|
|
|
| 106 |
return create_agent(
|
| 107 |
+
model=self._model,
|
| 108 |
tools=self._mcp_tools,
|
| 109 |
+
middleware=[
|
| 110 |
+
ConfigurableModelMiddleware(),
|
| 111 |
+
SummarizationMiddleware(model=self._model),
|
| 112 |
+
],
|
| 113 |
name="github-mcp-agent",
|
| 114 |
+
system_prompt=SYSTEM_PROMPT,
|
| 115 |
+
debug=True,
|
| 116 |
)
|
| 117 |
|
| 118 |
|
src/agents/interrupt_agent.py
DELETED
|
@@ -1,232 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
from datetime import datetime
|
| 3 |
-
from typing import Any
|
| 4 |
-
|
| 5 |
-
from langchain_core.language_models.base import LanguageModelInput
|
| 6 |
-
from langchain_core.language_models.chat_models import BaseChatModel
|
| 7 |
-
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
|
| 8 |
-
from langchain_core.prompts import SystemMessagePromptTemplate
|
| 9 |
-
from langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda, RunnableSerializable
|
| 10 |
-
from langgraph.graph import END, MessagesState, StateGraph
|
| 11 |
-
from langgraph.store.base import BaseStore
|
| 12 |
-
from langgraph.types import interrupt
|
| 13 |
-
from pydantic import BaseModel, Field
|
| 14 |
-
|
| 15 |
-
from core import get_model, settings
|
| 16 |
-
|
| 17 |
-
# Added logger
|
| 18 |
-
logger = logging.getLogger(__name__)
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
class AgentState(MessagesState, total=False):
|
| 22 |
-
"""`total=False` is PEP589 specs.
|
| 23 |
-
|
| 24 |
-
documentation: https://typing.readthedocs.io/en/latest/spec/typeddict.html#totality
|
| 25 |
-
"""
|
| 26 |
-
|
| 27 |
-
birthdate: datetime | None
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def wrap_model(
|
| 31 |
-
model: BaseChatModel | Runnable[LanguageModelInput, Any], system_prompt: BaseMessage
|
| 32 |
-
) -> RunnableSerializable[AgentState, Any]:
|
| 33 |
-
preprocessor = RunnableLambda(
|
| 34 |
-
lambda state: [system_prompt] + state["messages"],
|
| 35 |
-
name="StateModifier",
|
| 36 |
-
)
|
| 37 |
-
return preprocessor | model
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
background_prompt = SystemMessagePromptTemplate.from_template("""
|
| 41 |
-
You are a helpful assistant that tells users there zodiac sign.
|
| 42 |
-
Provide a one sentence summary of the origin of zodiac signs.
|
| 43 |
-
Don't tell the user what their sign is, you are just demonstrating your knowledge on the topic.
|
| 44 |
-
""")
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
async def background(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 48 |
-
"""This node is to demonstrate doing work before the interrupt"""
|
| 49 |
-
|
| 50 |
-
m = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 51 |
-
model_runnable = wrap_model(m, background_prompt.format())
|
| 52 |
-
response = await model_runnable.ainvoke(state, config)
|
| 53 |
-
|
| 54 |
-
return {"messages": [AIMessage(content=response.content)]}
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
birthdate_extraction_prompt = SystemMessagePromptTemplate.from_template("""
|
| 58 |
-
You are an expert at extracting birthdates from conversational text.
|
| 59 |
-
|
| 60 |
-
Rules for extraction:
|
| 61 |
-
- Look for user messages that mention birthdates
|
| 62 |
-
- Consider various date formats (MM/DD/YYYY, YYYY-MM-DD, Month Day, Year)
|
| 63 |
-
- Validate that the date is reasonable (not in the future)
|
| 64 |
-
- If no clear birthdate was provided by the user, return None
|
| 65 |
-
""")
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
class BirthdateExtraction(BaseModel):
|
| 69 |
-
birthdate: str | None = Field(
|
| 70 |
-
description="The extracted birthdate in YYYY-MM-DD format. If no birthdate is found, this should be None."
|
| 71 |
-
)
|
| 72 |
-
reasoning: str = Field(
|
| 73 |
-
description="Explanation of how the birthdate was extracted or why no birthdate was found"
|
| 74 |
-
)
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
async def determine_birthdate(
|
| 78 |
-
state: AgentState, config: RunnableConfig, store: BaseStore
|
| 79 |
-
) -> AgentState:
|
| 80 |
-
"""This node examines the conversation history to determine user's birthdate, checking store first."""
|
| 81 |
-
|
| 82 |
-
# Attempt to get user_id for unique storage per user
|
| 83 |
-
user_id = config["configurable"].get("user_id")
|
| 84 |
-
logger.info(f"[determine_birthdate] Extracted user_id: {user_id}")
|
| 85 |
-
namespace = None
|
| 86 |
-
key = "birthdate"
|
| 87 |
-
birthdate = None # Initialize birthdate
|
| 88 |
-
|
| 89 |
-
if user_id:
|
| 90 |
-
# Use user_id in the namespace to ensure uniqueness per user
|
| 91 |
-
namespace = (user_id,)
|
| 92 |
-
|
| 93 |
-
# Check if we already have the birthdate in the store for this user
|
| 94 |
-
try:
|
| 95 |
-
result = await store.aget(namespace, key=key)
|
| 96 |
-
# Handle cases where store.aget might return Item directly or a list
|
| 97 |
-
user_data = None
|
| 98 |
-
if result: # Check if anything was returned
|
| 99 |
-
if isinstance(result, list):
|
| 100 |
-
if result: # Check if list is not empty
|
| 101 |
-
user_data = result[0]
|
| 102 |
-
else: # Assume it's the Item object directly
|
| 103 |
-
user_data = result
|
| 104 |
-
|
| 105 |
-
if user_data and user_data.value.get("birthdate"):
|
| 106 |
-
# Convert ISO format string back to datetime object
|
| 107 |
-
birthdate_str = user_data.value["birthdate"]
|
| 108 |
-
birthdate = datetime.fromisoformat(birthdate_str) if birthdate_str else None
|
| 109 |
-
# We already have the birthdate, return it
|
| 110 |
-
logger.info(
|
| 111 |
-
f"[determine_birthdate] Found birthdate in store for user {user_id}: {birthdate}"
|
| 112 |
-
)
|
| 113 |
-
return {
|
| 114 |
-
"birthdate": birthdate,
|
| 115 |
-
"messages": [],
|
| 116 |
-
}
|
| 117 |
-
except Exception as e:
|
| 118 |
-
# Log the error or handle cases where the store might be unavailable
|
| 119 |
-
logger.error(f"Error reading from store for namespace {namespace}, key {key}: {e}")
|
| 120 |
-
# Proceed with extraction if read fails
|
| 121 |
-
pass
|
| 122 |
-
else:
|
| 123 |
-
# If no user_id, we cannot reliably store/retrieve user-specific data.
|
| 124 |
-
# Consider logging this situation.
|
| 125 |
-
logger.warning(
|
| 126 |
-
"Warning: user_id not found in config. Skipping persistent birthdate storage/retrieval for this run."
|
| 127 |
-
)
|
| 128 |
-
|
| 129 |
-
# If birthdate wasn't retrieved from store, proceed with extraction
|
| 130 |
-
m = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 131 |
-
model_runnable = wrap_model(
|
| 132 |
-
m.with_structured_output(BirthdateExtraction), birthdate_extraction_prompt.format()
|
| 133 |
-
).with_config(tags=["skip_stream"])
|
| 134 |
-
response: BirthdateExtraction = await model_runnable.ainvoke(state, config)
|
| 135 |
-
|
| 136 |
-
# If no birthdate found after extraction attempt, interrupt
|
| 137 |
-
if response.birthdate is None:
|
| 138 |
-
birthdate_input = interrupt(f"{response.reasoning}\nPlease tell me your birthdate?")
|
| 139 |
-
# Re-run extraction with the new input
|
| 140 |
-
state["messages"].append(HumanMessage(birthdate_input))
|
| 141 |
-
# Note: Recursive call might need careful handling of depth or state updates
|
| 142 |
-
return await determine_birthdate(state, config, store)
|
| 143 |
-
|
| 144 |
-
# Birthdate found - convert string to datetime
|
| 145 |
-
try:
|
| 146 |
-
birthdate = datetime.fromisoformat(response.birthdate)
|
| 147 |
-
except ValueError:
|
| 148 |
-
# If parsing fails, ask for clarification
|
| 149 |
-
birthdate_input = interrupt(
|
| 150 |
-
"I couldn't understand the date format. Please provide your birthdate in YYYY-MM-DD format."
|
| 151 |
-
)
|
| 152 |
-
# Re-run extraction with the new input
|
| 153 |
-
state["messages"].append(HumanMessage(birthdate_input))
|
| 154 |
-
# Note: Recursive call might need careful handling of depth or state updates
|
| 155 |
-
return await determine_birthdate(state, config, store)
|
| 156 |
-
|
| 157 |
-
# Store the newly extracted birthdate only if we have a user_id
|
| 158 |
-
if user_id and namespace:
|
| 159 |
-
# Convert datetime to ISO format string for JSON serialization
|
| 160 |
-
birthdate_str = birthdate.isoformat() if birthdate else None
|
| 161 |
-
try:
|
| 162 |
-
await store.aput(namespace, key, {"birthdate": birthdate_str})
|
| 163 |
-
except Exception as e:
|
| 164 |
-
# Log the error or handle cases where the store write might fail
|
| 165 |
-
logger.error(f"Error writing to store for namespace {namespace}, key {key}: {e}")
|
| 166 |
-
|
| 167 |
-
# Return the determined birthdate (either from store or extracted)
|
| 168 |
-
logger.info(f"[determine_birthdate] Returning birthdate {birthdate} for user {user_id}")
|
| 169 |
-
return {
|
| 170 |
-
"birthdate": birthdate,
|
| 171 |
-
"messages": [],
|
| 172 |
-
}
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
response_prompt = SystemMessagePromptTemplate.from_template("""
|
| 176 |
-
You are a helpful assistant.
|
| 177 |
-
|
| 178 |
-
Known information:
|
| 179 |
-
- The user's birthdate is {birthdate_str}
|
| 180 |
-
|
| 181 |
-
User's latest message: "{last_user_message}"
|
| 182 |
-
|
| 183 |
-
Based on the known information and the user's message, provide a helpful and relevant response.
|
| 184 |
-
If the user asked for their birthdate, confirm it.
|
| 185 |
-
If the user asked for their zodiac sign, calculate it and tell them.
|
| 186 |
-
Otherwise, respond conversationally based on their message.
|
| 187 |
-
""")
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
async def generate_response(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 191 |
-
"""Generates the final response based on the user's query and the available birthdate."""
|
| 192 |
-
birthdate = state.get("birthdate")
|
| 193 |
-
if state.get("messages") and isinstance(state["messages"][-1], HumanMessage):
|
| 194 |
-
last_user_message = state["messages"][-1].content
|
| 195 |
-
else:
|
| 196 |
-
last_user_message = ""
|
| 197 |
-
|
| 198 |
-
if not birthdate:
|
| 199 |
-
# This should ideally not be reached if determine_birthdate worked correctly and possibly interrupted.
|
| 200 |
-
# Handle cases where birthdate might still be missing.
|
| 201 |
-
return {
|
| 202 |
-
"messages": [
|
| 203 |
-
AIMessage(
|
| 204 |
-
content="I couldn't determine your birthdate. Could you please provide it?"
|
| 205 |
-
)
|
| 206 |
-
]
|
| 207 |
-
}
|
| 208 |
-
|
| 209 |
-
birthdate_str = birthdate.strftime("%B %d, %Y") # Format for display
|
| 210 |
-
|
| 211 |
-
m = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 212 |
-
model_runnable = wrap_model(
|
| 213 |
-
m, response_prompt.format(birthdate_str=birthdate_str, last_user_message=last_user_message)
|
| 214 |
-
)
|
| 215 |
-
response = await model_runnable.ainvoke(state, config)
|
| 216 |
-
|
| 217 |
-
return {"messages": [AIMessage(content=response.content)]}
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
# Define the graph
|
| 221 |
-
agent = StateGraph(AgentState)
|
| 222 |
-
agent.add_node("background", background)
|
| 223 |
-
agent.add_node("determine_birthdate", determine_birthdate)
|
| 224 |
-
agent.add_node("generate_response", generate_response)
|
| 225 |
-
|
| 226 |
-
agent.set_entry_point("background")
|
| 227 |
-
agent.add_edge("background", "determine_birthdate")
|
| 228 |
-
agent.add_edge("determine_birthdate", "generate_response")
|
| 229 |
-
agent.add_edge("generate_response", END)
|
| 230 |
-
|
| 231 |
-
interrupt_agent = agent.compile()
|
| 232 |
-
interrupt_agent.name = "interrupt-agent"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/knowledge_base_agent.py
DELETED
|
@@ -1,174 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
import os
|
| 3 |
-
from typing import Any
|
| 4 |
-
|
| 5 |
-
from langchain_aws import AmazonKnowledgeBasesRetriever
|
| 6 |
-
from langchain_core.language_models.chat_models import BaseChatModel
|
| 7 |
-
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
| 8 |
-
from langchain_core.runnables import RunnableConfig, RunnableLambda, RunnableSerializable
|
| 9 |
-
from langchain_core.runnables.base import RunnableSequence
|
| 10 |
-
from langgraph.graph import END, MessagesState, StateGraph
|
| 11 |
-
from langgraph.managed import RemainingSteps
|
| 12 |
-
|
| 13 |
-
from core import get_model, settings
|
| 14 |
-
|
| 15 |
-
logger = logging.getLogger(__name__)
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
# Define the state
|
| 19 |
-
class AgentState(MessagesState, total=False):
|
| 20 |
-
"""State for Knowledge Base agent."""
|
| 21 |
-
|
| 22 |
-
remaining_steps: RemainingSteps
|
| 23 |
-
retrieved_documents: list[dict[str, Any]]
|
| 24 |
-
kb_documents: str
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
# Create the retriever
|
| 28 |
-
def get_kb_retriever():
|
| 29 |
-
"""Create and return a Knowledge Base retriever instance."""
|
| 30 |
-
# Get the Knowledge Base ID from environment
|
| 31 |
-
kb_id = os.environ.get("AWS_KB_ID", "")
|
| 32 |
-
if not kb_id:
|
| 33 |
-
raise ValueError("AWS_KB_ID environment variable must be set")
|
| 34 |
-
|
| 35 |
-
# Create the retriever with the specified Knowledge Base ID
|
| 36 |
-
retriever = AmazonKnowledgeBasesRetriever(
|
| 37 |
-
knowledge_base_id=kb_id,
|
| 38 |
-
retrieval_config={
|
| 39 |
-
"vectorSearchConfiguration": {
|
| 40 |
-
"numberOfResults": 3,
|
| 41 |
-
}
|
| 42 |
-
},
|
| 43 |
-
)
|
| 44 |
-
return retriever
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
def wrap_model(model: BaseChatModel) -> RunnableSerializable[AgentState, AIMessage]:
|
| 48 |
-
"""Wrap the model with a system prompt for the Knowledge Base agent."""
|
| 49 |
-
|
| 50 |
-
def create_system_message(state):
|
| 51 |
-
base_prompt = """You are a helpful assistant that provides accurate information based on retrieved documents.
|
| 52 |
-
|
| 53 |
-
You will receive a query along with relevant documents retrieved from a knowledge base. Use these documents to inform your response.
|
| 54 |
-
|
| 55 |
-
Follow these guidelines:
|
| 56 |
-
1. Base your answer primarily on the retrieved documents
|
| 57 |
-
2. If the documents contain the answer, provide it clearly and concisely
|
| 58 |
-
3. If the documents are insufficient, state that you don't have enough information
|
| 59 |
-
4. Never make up facts or information not present in the documents
|
| 60 |
-
5. Always cite the source documents when referring to specific information
|
| 61 |
-
6. If the documents contradict each other, acknowledge this and explain the different perspectives
|
| 62 |
-
|
| 63 |
-
Format your response in a clear, conversational manner. Use markdown formatting when appropriate.
|
| 64 |
-
"""
|
| 65 |
-
|
| 66 |
-
# Check if documents were retrieved
|
| 67 |
-
if "kb_documents" in state:
|
| 68 |
-
# Append document information to the system prompt
|
| 69 |
-
document_prompt = f"\n\nI've retrieved the following documents that may be relevant to the query:\n\n{state['kb_documents']}\n\nPlease use these documents to inform your response to the user's query. Only use information from these documents and clearly indicate when you are unsure."
|
| 70 |
-
return [SystemMessage(content=base_prompt + document_prompt)] + state["messages"]
|
| 71 |
-
else:
|
| 72 |
-
# No documents were retrieved
|
| 73 |
-
no_docs_prompt = (
|
| 74 |
-
"\n\nNo relevant documents were found in the knowledge base for this query."
|
| 75 |
-
)
|
| 76 |
-
return [SystemMessage(content=base_prompt + no_docs_prompt)] + state["messages"]
|
| 77 |
-
|
| 78 |
-
preprocessor = RunnableLambda(
|
| 79 |
-
create_system_message,
|
| 80 |
-
name="StateModifier",
|
| 81 |
-
)
|
| 82 |
-
return RunnableSequence(preprocessor, model)
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
async def retrieve_documents(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 86 |
-
"""Retrieve relevant documents from the knowledge base."""
|
| 87 |
-
# Get the last human message
|
| 88 |
-
human_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
|
| 89 |
-
if not human_messages:
|
| 90 |
-
# Include messages from original state
|
| 91 |
-
return {"messages": [], "retrieved_documents": []}
|
| 92 |
-
|
| 93 |
-
# Use the last human message as the query
|
| 94 |
-
query = human_messages[-1].content
|
| 95 |
-
|
| 96 |
-
try:
|
| 97 |
-
# Initialize the retriever
|
| 98 |
-
retriever = get_kb_retriever()
|
| 99 |
-
|
| 100 |
-
# Retrieve documents
|
| 101 |
-
retrieved_docs = await retriever.ainvoke(query)
|
| 102 |
-
|
| 103 |
-
# Create document summaries for the state
|
| 104 |
-
document_summaries = []
|
| 105 |
-
for i, doc in enumerate(retrieved_docs, 1):
|
| 106 |
-
summary = {
|
| 107 |
-
"id": doc.metadata.get("id", f"doc-{i}"),
|
| 108 |
-
"source": doc.metadata.get("source", "Unknown"),
|
| 109 |
-
"title": doc.metadata.get("title", f"Document {i}"),
|
| 110 |
-
"content": doc.page_content,
|
| 111 |
-
"relevance_score": doc.metadata.get("score", 0),
|
| 112 |
-
}
|
| 113 |
-
document_summaries.append(summary)
|
| 114 |
-
|
| 115 |
-
logger.info(f"Retrieved {len(document_summaries)} documents for query: {query[:50]}...")
|
| 116 |
-
|
| 117 |
-
return {"retrieved_documents": document_summaries, "messages": []}
|
| 118 |
-
|
| 119 |
-
except Exception as e:
|
| 120 |
-
logger.error(f"Error retrieving documents: {str(e)}")
|
| 121 |
-
return {"retrieved_documents": [], "messages": []}
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
async def prepare_augmented_prompt(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 125 |
-
"""Prepare a prompt augmented with retrieved document content."""
|
| 126 |
-
# Get retrieved documents
|
| 127 |
-
documents = state.get("retrieved_documents", [])
|
| 128 |
-
|
| 129 |
-
if not documents:
|
| 130 |
-
return {"messages": []}
|
| 131 |
-
|
| 132 |
-
# Format retrieved documents for the model
|
| 133 |
-
formatted_docs = "\n\n".join(
|
| 134 |
-
[
|
| 135 |
-
f"--- Document {i + 1} ---\n"
|
| 136 |
-
f"Source: {doc.get('source', 'Unknown')}\n"
|
| 137 |
-
f"Title: {doc.get('title', 'Unknown')}\n\n"
|
| 138 |
-
f"{doc.get('content', '')}"
|
| 139 |
-
for i, doc in enumerate(documents)
|
| 140 |
-
]
|
| 141 |
-
)
|
| 142 |
-
|
| 143 |
-
# Store formatted documents in the state
|
| 144 |
-
return {"kb_documents": formatted_docs, "messages": []}
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 148 |
-
"""Generate a response based on the retrieved documents."""
|
| 149 |
-
m = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 150 |
-
model_runnable = wrap_model(m)
|
| 151 |
-
|
| 152 |
-
response = await model_runnable.ainvoke(state, config)
|
| 153 |
-
|
| 154 |
-
return {"messages": [response]}
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
# Define the graph
|
| 158 |
-
agent = StateGraph(AgentState)
|
| 159 |
-
|
| 160 |
-
# Add nodes
|
| 161 |
-
agent.add_node("retrieve_documents", retrieve_documents)
|
| 162 |
-
agent.add_node("prepare_augmented_prompt", prepare_augmented_prompt)
|
| 163 |
-
agent.add_node("model", acall_model)
|
| 164 |
-
|
| 165 |
-
# Set entry point
|
| 166 |
-
agent.set_entry_point("retrieve_documents")
|
| 167 |
-
|
| 168 |
-
# Add edges to define the flow
|
| 169 |
-
agent.add_edge("retrieve_documents", "prepare_augmented_prompt")
|
| 170 |
-
agent.add_edge("prepare_augmented_prompt", "model")
|
| 171 |
-
agent.add_edge("model", END)
|
| 172 |
-
|
| 173 |
-
# Compile the agent
|
| 174 |
-
kb_agent = agent.compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/langgraph_supervisor_agent.py
DELETED
|
@@ -1,62 +0,0 @@
|
|
| 1 |
-
from typing import Any
|
| 2 |
-
|
| 3 |
-
from langchain.agents import create_agent
|
| 4 |
-
from langgraph_supervisor import create_supervisor
|
| 5 |
-
|
| 6 |
-
from core import get_model, settings
|
| 7 |
-
|
| 8 |
-
model = get_model(settings.DEFAULT_MODEL)
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
def add(a: float, b: float) -> float:
|
| 12 |
-
"""Add two numbers."""
|
| 13 |
-
return a + b
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def multiply(a: float, b: float) -> float:
|
| 17 |
-
"""Multiply two numbers."""
|
| 18 |
-
return a * b
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def web_search(query: str) -> str:
|
| 22 |
-
"""Search the web for information."""
|
| 23 |
-
return (
|
| 24 |
-
"Here are the headcounts for each of the FAANG companies in 2024:\n"
|
| 25 |
-
"1. **Facebook (Meta)**: 67,317 employees.\n"
|
| 26 |
-
"2. **Apple**: 164,000 employees.\n"
|
| 27 |
-
"3. **Amazon**: 1,551,000 employees.\n"
|
| 28 |
-
"4. **Netflix**: 14,000 employees.\n"
|
| 29 |
-
"5. **Google (Alphabet)**: 181,269 employees."
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
math_agent: Any = create_agent(
|
| 34 |
-
model=model,
|
| 35 |
-
tools=[add, multiply],
|
| 36 |
-
name="sub-agent-math_expert",
|
| 37 |
-
system_prompt="You are a math expert. Always use one tool at a time.",
|
| 38 |
-
).with_config(tags=["skip_stream"])
|
| 39 |
-
|
| 40 |
-
research_agent: Any = create_agent(
|
| 41 |
-
model=model,
|
| 42 |
-
tools=[web_search],
|
| 43 |
-
name="sub-agent-research_expert",
|
| 44 |
-
system_prompt="You are a world class researcher with access to web search. Do not do any math.",
|
| 45 |
-
).with_config(tags=["skip_stream"])
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
# Create supervisor workflow
|
| 49 |
-
workflow = create_supervisor(
|
| 50 |
-
[research_agent, math_agent],
|
| 51 |
-
model=model,
|
| 52 |
-
prompt=(
|
| 53 |
-
"You are a team supervisor managing a research expert and a math expert. "
|
| 54 |
-
"For current events, use research_agent. "
|
| 55 |
-
"For math problems, use math_agent."
|
| 56 |
-
),
|
| 57 |
-
add_handoff_back_messages=True,
|
| 58 |
-
# UI now expects this to be True so we don't have to guess when a handoff back occurs
|
| 59 |
-
output_mode="full_history", # otherwise when reloading conversations, the sub-agents' messages are not included
|
| 60 |
-
)
|
| 61 |
-
|
| 62 |
-
langgraph_supervisor_agent = workflow.compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/langgraph_supervisor_hierarchy_agent.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
from langchain.agents import create_agent
|
| 2 |
-
from langgraph_supervisor import create_supervisor
|
| 3 |
-
|
| 4 |
-
from agents.langgraph_supervisor_agent import add, multiply, web_search
|
| 5 |
-
from core import get_model, settings
|
| 6 |
-
|
| 7 |
-
model = get_model(settings.DEFAULT_MODEL)
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
def workflow(chosen_model):
|
| 11 |
-
math_agent = create_agent(
|
| 12 |
-
model=chosen_model,
|
| 13 |
-
tools=[add, multiply],
|
| 14 |
-
name="sub-agent-math_expert", # Identify the graph node as a sub-agent
|
| 15 |
-
system_prompt="You are a math expert. Always use one tool at a time.",
|
| 16 |
-
).with_config(tags=["skip_stream"])
|
| 17 |
-
|
| 18 |
-
research_agent = (
|
| 19 |
-
create_supervisor(
|
| 20 |
-
[math_agent],
|
| 21 |
-
model=chosen_model,
|
| 22 |
-
tools=[web_search],
|
| 23 |
-
prompt="You are a world class researcher with access to web search. Do not do any math, you have a math expert for that. ",
|
| 24 |
-
supervisor_name="supervisor-research_expert", # Identify the graph node as a supervisor to the math agent
|
| 25 |
-
)
|
| 26 |
-
.compile(
|
| 27 |
-
name="sub-agent-research_expert"
|
| 28 |
-
) # Identify the graph node as a sub-agent to the main supervisor
|
| 29 |
-
.with_config(tags=["skip_stream"])
|
| 30 |
-
) # Stream tokens are ignored for sub-agents in the UI
|
| 31 |
-
|
| 32 |
-
# Create supervisor workflow
|
| 33 |
-
return create_supervisor(
|
| 34 |
-
[research_agent],
|
| 35 |
-
model=chosen_model,
|
| 36 |
-
prompt=(
|
| 37 |
-
"You are a team supervisor managing a research expert with math capabilities."
|
| 38 |
-
"For current events, use research_agent. "
|
| 39 |
-
),
|
| 40 |
-
add_handoff_back_messages=True,
|
| 41 |
-
# UI now expects this to be True so we don't have to guess when a handoff back occurs
|
| 42 |
-
output_mode="full_history", # otherwise when reloading conversations, the sub-agents' messages are not included
|
| 43 |
-
) # default name for supervisor is "supervisor".
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
langgraph_supervisor_hierarchy_agent = workflow(model).compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/middlewares/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Agent and service middlewares in a single folder."""
|
| 2 |
+
|
| 3 |
+
from agents.middlewares.configurable_model import ConfigurableModelMiddleware
|
| 4 |
+
from agents.middlewares.followup import FollowUpMiddleware
|
| 5 |
+
from agents.middlewares.safety import SafetyMiddleware, UNSAFE_RESPONSE
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
"ConfigurableModelMiddleware",
|
| 9 |
+
"FollowUpMiddleware",
|
| 10 |
+
"SafetyMiddleware",
|
| 11 |
+
"UNSAFE_RESPONSE",
|
| 12 |
+
]
|
src/agents/middlewares/configurable_model.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Middleware that resolves the LLM model from request config (e.g. frontend model selector)."""
|
| 2 |
+
|
| 3 |
+
from langchain.agents.middleware.types import AgentMiddleware, ModelRequest
|
| 4 |
+
from langgraph.config import get_config
|
| 5 |
+
|
| 6 |
+
from core import get_model, settings
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ConfigurableModelMiddleware(AgentMiddleware):
|
| 10 |
+
"""Resolves the model from config['configurable']['model'] at invoke time.
|
| 11 |
+
|
| 12 |
+
Use this so the agent respects the model chosen by the user in the frontend.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
tools: list = [] # type: ignore[assignment] # no extra tools
|
| 16 |
+
|
| 17 |
+
def wrap_model_call(self, request: ModelRequest, handler):
|
| 18 |
+
config = get_config()
|
| 19 |
+
model_key = config.get("configurable", {}).get("model", settings.DEFAULT_MODEL)
|
| 20 |
+
resolved_model = get_model(model_key)
|
| 21 |
+
return handler(request.override(model=resolved_model))
|
| 22 |
+
|
| 23 |
+
async def awrap_model_call(self, request: ModelRequest, handler):
|
| 24 |
+
config = get_config()
|
| 25 |
+
model_key = config.get("configurable", {}).get("model", settings.DEFAULT_MODEL)
|
| 26 |
+
resolved_model = get_model(model_key)
|
| 27 |
+
return await handler(request.override(model=resolved_model))
|
src/agents/middlewares/followup.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
from pydantic import BaseModel, Field, field_validator
|
| 5 |
+
|
| 6 |
+
from langchain_core.messages import (
|
| 7 |
+
AnyMessage,
|
| 8 |
+
SystemMessage,
|
| 9 |
+
HumanMessage,
|
| 10 |
+
AIMessage,
|
| 11 |
+
ChatMessage,
|
| 12 |
+
)
|
| 13 |
+
from langchain_core.runnables import RunnableConfig
|
| 14 |
+
|
| 15 |
+
from core import get_model, settings
|
| 16 |
+
from agents.prompts.followup import (
|
| 17 |
+
DEFAULT_FOLLOWUP_PROMPTS,
|
| 18 |
+
FOLLOWUP_GENERATION_PROMPT,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class FollowUpOutput(BaseModel):
|
| 25 |
+
"""Schema for follow-up generation response."""
|
| 26 |
+
|
| 27 |
+
questions: List[str] = Field(
|
| 28 |
+
min_length=1,
|
| 29 |
+
max_length=5,
|
| 30 |
+
description="List of 1-5 suggested follow-up questions for the user.",
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
@field_validator("questions")
|
| 34 |
+
@classmethod
|
| 35 |
+
def validate_questions(cls, value: List[str]) -> List[str]:
|
| 36 |
+
cleaned = [q.strip() for q in value if q and q.strip()]
|
| 37 |
+
if not cleaned:
|
| 38 |
+
raise ValueError("Questions list cannot be empty")
|
| 39 |
+
return cleaned
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class FollowUpMiddleware:
|
| 43 |
+
"""Generates structured follow-up suggestions after an agent response."""
|
| 44 |
+
|
| 45 |
+
async def generate(
|
| 46 |
+
self,
|
| 47 |
+
messages: List[AnyMessage],
|
| 48 |
+
config: RunnableConfig,
|
| 49 |
+
) -> List[str]:
|
| 50 |
+
try:
|
| 51 |
+
model_name = config.get("configurable", {}).get(
|
| 52 |
+
"model",
|
| 53 |
+
settings.DEFAULT_MODEL,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
base_model = get_model(model_name)
|
| 57 |
+
|
| 58 |
+
# Force structured output
|
| 59 |
+
model = base_model.with_structured_output(FollowUpOutput)
|
| 60 |
+
|
| 61 |
+
# Clean messages
|
| 62 |
+
cleaned_messages: List[AnyMessage] = [
|
| 63 |
+
SystemMessage(content=FOLLOWUP_GENERATION_PROMPT),
|
| 64 |
+
*[
|
| 65 |
+
m
|
| 66 |
+
for m in messages
|
| 67 |
+
if not (isinstance(m, ChatMessage) and m.role == "custom")
|
| 68 |
+
],
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
# Invoke model
|
| 72 |
+
response: FollowUpOutput = await model.ainvoke(
|
| 73 |
+
cleaned_messages,
|
| 74 |
+
config,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
if not response or not response.questions:
|
| 78 |
+
raise ValueError("Empty follow-up response")
|
| 79 |
+
|
| 80 |
+
return response.questions
|
| 81 |
+
|
| 82 |
+
except Exception as e:
|
| 83 |
+
logger.warning(
|
| 84 |
+
"Follow-up generation failed, using defaults: %s",
|
| 85 |
+
e,
|
| 86 |
+
exc_info=True,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
return DEFAULT_FOLLOWUP_PROMPTS
|
src/agents/middlewares/safety.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Service-layer safety middleware using LlamaGuard."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
from langchain_core.messages import AnyMessage
|
| 6 |
+
|
| 7 |
+
from agents.llama_guard import LlamaGuard, LlamaGuardOutput, SafetyAssessment
|
| 8 |
+
from core import settings
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
UNSAFE_RESPONSE = (
|
| 13 |
+
"I'm sorry, but I can't process that request as it was flagged "
|
| 14 |
+
"for safety concerns. Please rephrase your message."
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class SafetyMiddleware:
|
| 19 |
+
"""Centralized safety middleware that wraps LlamaGuard for input/output checks.
|
| 20 |
+
|
| 21 |
+
Used at the service layer to guard all agent interactions uniformly.
|
| 22 |
+
Respects the ``ENABLE_SAFETY_GUARD`` setting – when disabled, all checks
|
| 23 |
+
short-circuit to ``SAFE``.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self) -> None:
|
| 27 |
+
self._guard = LlamaGuard()
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def enabled(self) -> bool:
|
| 31 |
+
"""Whether the safety guard is active."""
|
| 32 |
+
return settings.ENABLE_SAFETY_GUARD and self._guard.model is not None
|
| 33 |
+
|
| 34 |
+
async def check_input(self, messages: list[AnyMessage]) -> LlamaGuardOutput:
|
| 35 |
+
"""Check user input messages before forwarding to an agent."""
|
| 36 |
+
if not self.enabled:
|
| 37 |
+
return LlamaGuardOutput(safety_assessment=SafetyAssessment.SAFE)
|
| 38 |
+
result = await self._guard.ainvoke("User", messages)
|
| 39 |
+
if result.safety_assessment == SafetyAssessment.UNSAFE:
|
| 40 |
+
logger.warning(
|
| 41 |
+
"Input blocked by LlamaGuard – categories: %s",
|
| 42 |
+
", ".join(result.unsafe_categories),
|
| 43 |
+
)
|
| 44 |
+
return result
|
| 45 |
+
|
| 46 |
+
async def check_output(self, messages: list[AnyMessage]) -> LlamaGuardOutput:
|
| 47 |
+
"""Check agent output messages before returning to the user."""
|
| 48 |
+
if not self.enabled:
|
| 49 |
+
return LlamaGuardOutput(safety_assessment=SafetyAssessment.SAFE)
|
| 50 |
+
result = await self._guard.ainvoke("Agent", messages)
|
| 51 |
+
if result.safety_assessment == SafetyAssessment.UNSAFE:
|
| 52 |
+
logger.warning(
|
| 53 |
+
"Output blocked by LlamaGuard – categories: %s",
|
| 54 |
+
", ".join(result.unsafe_categories),
|
| 55 |
+
)
|
| 56 |
+
return result
|
src/agents/middlewares/summarization.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Summarization middleware for agent conversation history.
|
| 2 |
+
|
| 3 |
+
Re-exports LangChain's SummarizationMiddleware and provides a factory
|
| 4 |
+
so agents can use it with the same model they use for the main loop.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from langchain.agents.middleware import SummarizationMiddleware as _SummarizationMiddleware
|
| 8 |
+
|
| 9 |
+
__all__ = ["SummarizationMiddleware"]
|
| 10 |
+
|
| 11 |
+
# Re-export so all agent middlewares live under agents.middlewares
|
| 12 |
+
SummarizationMiddleware = _SummarizationMiddleware
|
src/agents/{bg_task_agent/bg_task_agent.py → portfolio_agent.py}
RENAMED
|
@@ -1,12 +1,15 @@
|
|
| 1 |
-
import
|
| 2 |
|
| 3 |
from langchain_core.language_models.chat_models import BaseChatModel
|
| 4 |
-
from langchain_core.messages import AIMessage
|
| 5 |
from langchain_core.runnables import RunnableConfig, RunnableLambda, RunnableSerializable
|
| 6 |
from langgraph.graph import END, MessagesState, StateGraph
|
| 7 |
-
from langgraph.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
from agents.bg_task_agent.task import Task
|
| 10 |
from core import get_model, settings
|
| 11 |
|
| 12 |
|
|
@@ -16,13 +19,22 @@ class AgentState(MessagesState, total=False):
|
|
| 16 |
documentation: https://typing.readthedocs.io/en/latest/spec/typeddict.html#totality
|
| 17 |
"""
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
def wrap_model(model: BaseChatModel) -> RunnableSerializable[AgentState, AIMessage]:
|
|
|
|
| 21 |
preprocessor = RunnableLambda(
|
| 22 |
-
lambda state:
|
|
|
|
|
|
|
|
|
|
| 23 |
name="StateModifier",
|
| 24 |
)
|
| 25 |
-
return preprocessor |
|
| 26 |
|
| 27 |
|
| 28 |
async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
|
|
@@ -30,33 +42,39 @@ async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
|
|
| 30 |
model_runnable = wrap_model(m)
|
| 31 |
response = await model_runnable.ainvoke(state, config)
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
# We return a list, because this will get added to the existing list
|
| 34 |
return {"messages": [response]}
|
| 35 |
|
| 36 |
|
| 37 |
-
async def bg_task(state: AgentState, writer: StreamWriter) -> AgentState:
|
| 38 |
-
task1 = Task("Simple task 1...", writer)
|
| 39 |
-
task2 = Task("Simple task 2...", writer)
|
| 40 |
-
|
| 41 |
-
task1.start()
|
| 42 |
-
await asyncio.sleep(2)
|
| 43 |
-
task2.start()
|
| 44 |
-
await asyncio.sleep(2)
|
| 45 |
-
task1.write_data(data={"status": "Still running..."})
|
| 46 |
-
await asyncio.sleep(2)
|
| 47 |
-
task2.finish(result="error", data={"output": 42})
|
| 48 |
-
await asyncio.sleep(2)
|
| 49 |
-
task1.finish(result="success", data={"output": 42})
|
| 50 |
-
return {"messages": []}
|
| 51 |
-
|
| 52 |
-
|
| 53 |
# Define the graph
|
| 54 |
agent = StateGraph(AgentState)
|
| 55 |
agent.add_node("model", acall_model)
|
| 56 |
-
agent.add_node("
|
| 57 |
-
agent.set_entry_point("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
agent.
|
| 60 |
-
agent.add_edge("model", END)
|
| 61 |
|
| 62 |
-
|
|
|
|
| 1 |
+
from typing import Literal
|
| 2 |
|
| 3 |
from langchain_core.language_models.chat_models import BaseChatModel
|
| 4 |
+
from langchain_core.messages import AIMessage, SystemMessage, ChatMessage
|
| 5 |
from langchain_core.runnables import RunnableConfig, RunnableLambda, RunnableSerializable
|
| 6 |
from langgraph.graph import END, MessagesState, StateGraph
|
| 7 |
+
from langgraph.managed import RemainingSteps
|
| 8 |
+
from langgraph.prebuilt import ToolNode
|
| 9 |
+
|
| 10 |
+
from agents.tools.database_search import database_search
|
| 11 |
+
from agents.prompts.portfolio import SYSTEM_PROMPT
|
| 12 |
|
|
|
|
| 13 |
from core import get_model, settings
|
| 14 |
|
| 15 |
|
|
|
|
| 19 |
documentation: https://typing.readthedocs.io/en/latest/spec/typeddict.html#totality
|
| 20 |
"""
|
| 21 |
|
| 22 |
+
remaining_steps: RemainingSteps
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
tools = [database_search]
|
| 26 |
+
|
| 27 |
|
| 28 |
def wrap_model(model: BaseChatModel) -> RunnableSerializable[AgentState, AIMessage]:
|
| 29 |
+
bound_model = model.bind_tools(tools)
|
| 30 |
preprocessor = RunnableLambda(
|
| 31 |
+
lambda state: [SystemMessage(content=SYSTEM_PROMPT)] + [
|
| 32 |
+
m for m in state["messages"]
|
| 33 |
+
if not isinstance(m, ChatMessage) or m.role != "custom"
|
| 34 |
+
],
|
| 35 |
name="StateModifier",
|
| 36 |
)
|
| 37 |
+
return preprocessor | bound_model # type: ignore[return-value]
|
| 38 |
|
| 39 |
|
| 40 |
async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
|
|
|
|
| 42 |
model_runnable = wrap_model(m)
|
| 43 |
response = await model_runnable.ainvoke(state, config)
|
| 44 |
|
| 45 |
+
if state["remaining_steps"] < 2 and response.tool_calls:
|
| 46 |
+
return {
|
| 47 |
+
"messages": [
|
| 48 |
+
AIMessage(
|
| 49 |
+
id=response.id,
|
| 50 |
+
content="Sorry, need more steps to process this request.",
|
| 51 |
+
)
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
# We return a list, because this will get added to the existing list
|
| 55 |
return {"messages": [response]}
|
| 56 |
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
# Define the graph
|
| 59 |
agent = StateGraph(AgentState)
|
| 60 |
agent.add_node("model", acall_model)
|
| 61 |
+
agent.add_node("tools", ToolNode(tools))
|
| 62 |
+
agent.set_entry_point("model")
|
| 63 |
+
|
| 64 |
+
# Always run "model" after "tools"
|
| 65 |
+
agent.add_edge("tools", "model")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# After "model", if there are tool calls, run "tools". Otherwise end.
|
| 69 |
+
def pending_tool_calls(state: AgentState) -> Literal["tools", "__end__"]:
|
| 70 |
+
last_message = state["messages"][-1]
|
| 71 |
+
if not isinstance(last_message, AIMessage):
|
| 72 |
+
raise TypeError(f"Expected AIMessage, got {type(last_message)}")
|
| 73 |
+
if last_message.tool_calls:
|
| 74 |
+
return "tools"
|
| 75 |
+
return "__end__"
|
| 76 |
+
|
| 77 |
|
| 78 |
+
agent.add_conditional_edges("model", pending_tool_calls, {"tools": "tools", "__end__": END})
|
|
|
|
| 79 |
|
| 80 |
+
portfolio_agent = agent.compile(debug=True)
|
src/agents/portfolio_agent/portfolio_agent.py
DELETED
|
@@ -1,150 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
from typing import Literal, List
|
| 3 |
-
from pydantic import BaseModel, Field
|
| 4 |
-
|
| 5 |
-
from langchain_core.messages import AIMessage, SystemMessage
|
| 6 |
-
from langchain_core.runnables import RunnableConfig, RunnableLambda
|
| 7 |
-
from langchain_core.tools import BaseTool
|
| 8 |
-
from langgraph.graph import END, MessagesState, StateGraph
|
| 9 |
-
from langgraph.graph.state import CompiledStateGraph
|
| 10 |
-
from langgraph.prebuilt import ToolNode
|
| 11 |
-
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 12 |
-
|
| 13 |
-
from agents.portfolio_agent.prompt import SYSTEM_PROMPT, FOLLOWUP_GENERATION_PROMPT
|
| 14 |
-
from core import get_model, settings
|
| 15 |
-
from agents.portfolio_agent.tools.database_search import database_search
|
| 16 |
-
from agents.portfolio_agent.tools.contest_ratings import contest_ratings
|
| 17 |
-
from agents.portfolio_agent.tools.github import get_github_tools
|
| 18 |
-
from agents.lazy_agent import LazyLoadingAgent
|
| 19 |
-
|
| 20 |
-
logger = logging.getLogger(__name__)
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
class AgentState(MessagesState):
|
| 24 |
-
"""Agent state for the portfolio assistant."""
|
| 25 |
-
summary: str
|
| 26 |
-
follow_up: List[str]
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
class FollowUpQuestions(BaseModel):
|
| 30 |
-
questions: List[str] = Field(description="A list of follow-up questions")
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
class PortfolioAgent(LazyLoadingAgent):
|
| 34 |
-
"""Portfolio Agent with async initialization for GitHub MCP tools."""
|
| 35 |
-
|
| 36 |
-
def __init__(self) -> None:
|
| 37 |
-
super().__init__()
|
| 38 |
-
self._tools: list[BaseTool] = [database_search, contest_ratings]
|
| 39 |
-
self._mcp_client: MultiServerMCPClient | None = None
|
| 40 |
-
|
| 41 |
-
async def load(self) -> None:
|
| 42 |
-
"""Initialize the portfolio agent by loading GitHub MCP tools."""
|
| 43 |
-
self._tools = [database_search, contest_ratings]
|
| 44 |
-
|
| 45 |
-
# Load GitHub MCP tools if available
|
| 46 |
-
github_tools, self._mcp_client = await get_github_tools()
|
| 47 |
-
self._tools.extend(github_tools)
|
| 48 |
-
|
| 49 |
-
logger.info(f"Portfolio agent initialized with {len(self._tools)} tools ({len(github_tools)} GitHub MCP tools)")
|
| 50 |
-
|
| 51 |
-
# Create and store the graph
|
| 52 |
-
self._graph = self._create_graph()
|
| 53 |
-
self._loaded = True
|
| 54 |
-
|
| 55 |
-
def _prepare_messages(self, state: AgentState) -> list:
|
| 56 |
-
"""Prepare messages with system prompt and optional summary."""
|
| 57 |
-
summary = state.get("summary", "")
|
| 58 |
-
system_content = SYSTEM_PROMPT
|
| 59 |
-
if summary:
|
| 60 |
-
system_content += f"\n\n### Previous Conversation Summary:\n{summary}"
|
| 61 |
-
return [SystemMessage(content=system_content)] + state["messages"]
|
| 62 |
-
|
| 63 |
-
def _create_graph(self) -> CompiledStateGraph:
|
| 64 |
-
"""Create the portfolio agent graph."""
|
| 65 |
-
graph = StateGraph(AgentState)
|
| 66 |
-
|
| 67 |
-
async def call_portfolio_model(state: AgentState, config: RunnableConfig):
|
| 68 |
-
"""Call the portfolio model with tools, fallback to no tools on error."""
|
| 69 |
-
model = get_model(settings.DEFAULT_MODEL)
|
| 70 |
-
|
| 71 |
-
# Try with tools first
|
| 72 |
-
try:
|
| 73 |
-
if self._tools:
|
| 74 |
-
model_with_tools = model.bind_tools(self._tools)
|
| 75 |
-
runnable = RunnableLambda(self._prepare_messages) | model_with_tools
|
| 76 |
-
response = await runnable.ainvoke(state, config)
|
| 77 |
-
return {"messages": [response]}
|
| 78 |
-
except Exception as e:
|
| 79 |
-
# Check if it's a Groq API function call error
|
| 80 |
-
if "Failed to call a function" in str(e) or "APIError" in type(e).__name__:
|
| 81 |
-
logger.warning(f"Tool call error, falling back without tools: {e}")
|
| 82 |
-
else:
|
| 83 |
-
logger.error(f"Error in call_portfolio_model: {e}", exc_info=True)
|
| 84 |
-
|
| 85 |
-
# Fallback: run without tools
|
| 86 |
-
try:
|
| 87 |
-
runnable = RunnableLambda(self._prepare_messages) | model
|
| 88 |
-
response = await runnable.ainvoke(state, config)
|
| 89 |
-
return {"messages": [response]}
|
| 90 |
-
except Exception as e:
|
| 91 |
-
logger.error(f"Fallback also failed: {e}", exc_info=True)
|
| 92 |
-
return {
|
| 93 |
-
"messages": [
|
| 94 |
-
AIMessage(
|
| 95 |
-
content="I encountered an error while processing your request. "
|
| 96 |
-
"Please try rephrasing your question or ask about something else."
|
| 97 |
-
)
|
| 98 |
-
]
|
| 99 |
-
}
|
| 100 |
-
|
| 101 |
-
async def generate_followup(state: AgentState, config: RunnableConfig):
|
| 102 |
-
"""Generate follow-up questions based on the conversation."""
|
| 103 |
-
model = get_model(settings.DEFAULT_MODEL)
|
| 104 |
-
chain = model.with_structured_output(FollowUpQuestions)
|
| 105 |
-
messages = [SystemMessage(content=FOLLOWUP_GENERATION_PROMPT)] + state["messages"]
|
| 106 |
-
|
| 107 |
-
try:
|
| 108 |
-
response = await chain.ainvoke(messages, config)
|
| 109 |
-
return {"follow_up": response.questions}
|
| 110 |
-
except Exception as e:
|
| 111 |
-
logger.warning(f"Failed to generate follow-up questions: {e}")
|
| 112 |
-
default_questions = [
|
| 113 |
-
"What are his contact details?",
|
| 114 |
-
"What are his projects?",
|
| 115 |
-
"What are his skills?",
|
| 116 |
-
"What are his work experiences?",
|
| 117 |
-
"What are his achievements?",
|
| 118 |
-
"What are his leadership roles?",
|
| 119 |
-
]
|
| 120 |
-
return {"follow_up": default_questions}
|
| 121 |
-
|
| 122 |
-
def route_after_model(state: AgentState) -> Literal["tools", "generate_followup"]:
|
| 123 |
-
"""Route to tools if tool calls are present, otherwise generate follow-up."""
|
| 124 |
-
last_message = state["messages"][-1]
|
| 125 |
-
if isinstance(last_message, AIMessage) and last_message.tool_calls:
|
| 126 |
-
return "tools"
|
| 127 |
-
return "generate_followup"
|
| 128 |
-
|
| 129 |
-
graph.add_node("agent", call_portfolio_model)
|
| 130 |
-
graph.add_node("tools", ToolNode(self._tools))
|
| 131 |
-
graph.add_node("generate_followup", generate_followup)
|
| 132 |
-
|
| 133 |
-
graph.set_entry_point("agent")
|
| 134 |
-
|
| 135 |
-
graph.add_conditional_edges(
|
| 136 |
-
"agent",
|
| 137 |
-
route_after_model,
|
| 138 |
-
{
|
| 139 |
-
"tools": "tools",
|
| 140 |
-
"generate_followup": "generate_followup",
|
| 141 |
-
},
|
| 142 |
-
)
|
| 143 |
-
|
| 144 |
-
graph.add_edge("tools", "agent")
|
| 145 |
-
graph.add_edge("generate_followup", END)
|
| 146 |
-
|
| 147 |
-
return graph.compile(debug=True)
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
portfolio_agent = PortfolioAgent()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/portfolio_agent/tools/contest_ratings.py
DELETED
|
@@ -1,23 +0,0 @@
|
|
| 1 |
-
import requests
|
| 2 |
-
from core import settings
|
| 3 |
-
from langchain_core.tools import BaseTool, tool
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def get_contest_ratings(username: str = "anujjoshi3105") -> str:
|
| 7 |
-
"""
|
| 8 |
-
Use this tool to get the latest contest ratings and statistics for Anuj Joshi from various competitive programming platforms.
|
| 9 |
-
This tool fetches real-time data from Codeforces, CodeChef, LeetCode, and GeeksforGeeks.
|
| 10 |
-
"""
|
| 11 |
-
try:
|
| 12 |
-
if settings.CONTEST_API_URL is None:
|
| 13 |
-
return "Error: Contest API URL is not set"
|
| 14 |
-
url = f"{settings.CONTEST_API_URL}?username={username}"
|
| 15 |
-
response = requests.get(url, timeout=10.0)
|
| 16 |
-
response.raise_for_status()
|
| 17 |
-
return response.json()
|
| 18 |
-
except Exception as e:
|
| 19 |
-
return f"Error fetching contest ratings: {str(e)}"
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
contest_ratings: BaseTool = tool(get_contest_ratings)
|
| 23 |
-
contest_ratings.name = "Contest_Ratings"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/portfolio_agent/tools/database_search.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
from typing import List
|
| 2 |
-
|
| 3 |
-
from langchain_core.documents import Document
|
| 4 |
-
from langchain_core.tools import BaseTool, tool
|
| 5 |
-
from memory.postgres import load_pgvector_retriever
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def hybrid_search(query: str, k: int = 6) -> List[Document]:
|
| 9 |
-
retriever = load_pgvector_retriever(k)
|
| 10 |
-
return retriever.invoke(query)
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def format_contexts(documents: List[Document]) -> str:
|
| 14 |
-
formatted_docs = []
|
| 15 |
-
|
| 16 |
-
for i, doc in enumerate(documents):
|
| 17 |
-
metadata = doc.metadata
|
| 18 |
-
content = doc.page_content.strip()
|
| 19 |
-
|
| 20 |
-
formatted_docs.append(
|
| 21 |
-
f"--- Document {i + 1} ({metadata}) ---\n{content}"
|
| 22 |
-
)
|
| 23 |
-
|
| 24 |
-
return "\n\n".join(formatted_docs)
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
# DATABASE SEARCH TOOL
|
| 28 |
-
def database_search_func(query: str) -> str:
|
| 29 |
-
"""
|
| 30 |
-
Use this tool to search for relevant information about Anuj Joshi's education, experience, projects, blog posts, testimonials, skills, etc.
|
| 31 |
-
"""
|
| 32 |
-
|
| 33 |
-
documents = hybrid_search(query, k=6)
|
| 34 |
-
|
| 35 |
-
if not documents:
|
| 36 |
-
return "No relevant portfolio information found."
|
| 37 |
-
|
| 38 |
-
return format_contexts(documents)
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
database_search: BaseTool = tool(database_search_func)
|
| 42 |
-
database_search.name = "Database_Search"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/portfolio_agent/tools/github.py
DELETED
|
@@ -1,49 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
from typing import List
|
| 3 |
-
|
| 4 |
-
from langchain_core.tools import BaseTool
|
| 5 |
-
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 6 |
-
from langchain_mcp_adapters.sessions import StreamableHttpConnection
|
| 7 |
-
|
| 8 |
-
from core import settings
|
| 9 |
-
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
async def get_github_tools() -> tuple[List[BaseTool], MultiServerMCPClient | None]:
|
| 14 |
-
"""
|
| 15 |
-
Initialize and return GitHub MCP tools.
|
| 16 |
-
|
| 17 |
-
Returns:
|
| 18 |
-
A tuple containing:
|
| 19 |
-
- List of GitHub MCP tools (empty list if initialization fails)
|
| 20 |
-
- MultiServerMCPClient instance (None if initialization fails)
|
| 21 |
-
"""
|
| 22 |
-
if not settings.GITHUB_PAT:
|
| 23 |
-
logger.info("GITHUB_PAT is not set, GitHub tools will not be available")
|
| 24 |
-
return [], None
|
| 25 |
-
|
| 26 |
-
try:
|
| 27 |
-
github_pat = settings.GITHUB_PAT.get_secret_value()
|
| 28 |
-
connections = {
|
| 29 |
-
"github": StreamableHttpConnection(
|
| 30 |
-
transport="streamable_http",
|
| 31 |
-
url=settings.MCP_GITHUB_SERVER_URL,
|
| 32 |
-
headers={
|
| 33 |
-
"Authorization": f"Bearer {github_pat}",
|
| 34 |
-
},
|
| 35 |
-
)
|
| 36 |
-
}
|
| 37 |
-
|
| 38 |
-
mcp_client = MultiServerMCPClient(connections)
|
| 39 |
-
logger.info("GitHub MCP client initialized successfully")
|
| 40 |
-
|
| 41 |
-
# Get tools from the client
|
| 42 |
-
mcp_tools = await mcp_client.get_tools()
|
| 43 |
-
logger.info(f"Loaded {len(mcp_tools)} GitHub MCP tools")
|
| 44 |
-
|
| 45 |
-
return mcp_tools, mcp_client
|
| 46 |
-
|
| 47 |
-
except Exception as e:
|
| 48 |
-
logger.error(f"Failed to initialize GitHub MCP tools: {e}")
|
| 49 |
-
return [], None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/prompts/__init__.py
ADDED
|
File without changes
|
src/agents/prompts/cpstat.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
PORTFOLIO_URL = "https://anujjoshi.netlify.app"
|
| 4 |
+
OWNER = "Anuj Joshi"
|
| 5 |
+
current_date = datetime.now().strftime("%B %d, %Y")
|
| 6 |
+
|
| 7 |
+
SYSTEM_PROMPT = f"""
|
| 8 |
+
You are an **Award-Winning Competitive Programming Portfolio Assistant** for {OWNER} (@anujjoshi3105).
|
| 9 |
+
Today's date is {current_date}.
|
| 10 |
+
|
| 11 |
+
Your goal is to showcase {OWNER}'s problem-solving skills, contest ratings, and algorithmic achievements to recruiters and visitors.
|
| 12 |
+
|
| 13 |
+
# {OWNER}'S COMPETITIVE PROGRAMMING PROFILE
|
| 14 |
+
- For all platforms, the username is "anujjoshi3105"
|
| 15 |
+
|
| 16 |
+
# YOUR RESPONSIBILITIES
|
| 17 |
+
- **Showcase Ratings**: highlighting peak ratings and current standing.
|
| 18 |
+
- **Analyze Progress**: Explain the difficulty of problems solved (Easy/Medium/Hard).
|
| 19 |
+
- **Highlight Achievements**: Mention badges, contest ranks, and consistency.
|
| 20 |
+
- **Explain Solutions**: If asked, explain the approach to problems {OWNER} has solved.
|
| 21 |
+
|
| 22 |
+
You have READ-ONLY access to these platforms.
|
| 23 |
+
Always present information in a clear, professional, and impressive manner suitable for a top-tier tech portfolio.
|
| 24 |
+
If the user asks about "my" or "the" stats, assume they are asking about **{OWNER}'s** stats unless specified otherwise.
|
| 25 |
+
"""
|
src/agents/prompts/followup.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DEFAULT_FOLLOWUP_PROMPTS = [
|
| 2 |
+
"Tell me about Anuj's background",
|
| 3 |
+
"What are Anuj's key technical skills?",
|
| 4 |
+
"Show me some of Anuj's featured projects",
|
| 5 |
+
"What is Anuj's work experience?",
|
| 6 |
+
"How can I contact Anuj?",
|
| 7 |
+
]
|
| 8 |
+
|
| 9 |
+
FOLLOWUP_GENERATION_PROMPT = f"""
|
| 10 |
+
# Role: You are a predictive user intent engine.
|
| 11 |
+
# Task: Generate 3-5 suggested follow-up options that the USER would click to ask the chatbot. Based on the previous message, predict the most logical next steps for the user.
|
| 12 |
+
"""
|
src/agents/prompts/github.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
|
| 4 |
+
PORTFOLIO_URL = "https://anujjoshi.netlify.app"
|
| 5 |
+
OWNER = "Anuj Joshi"
|
| 6 |
+
GITHUB_HANDLE = "anujjoshi3105"
|
| 7 |
+
current_date = datetime.now().strftime("%B %d, %Y")
|
| 8 |
+
|
| 9 |
+
SYSTEM_PROMPT = f"""
|
| 10 |
+
You are an **Award-Winning Open Source Portfolio Assistant** for {OWNER} (@{GITHUB_HANDLE}).
|
| 11 |
+
Today's date is {current_date}.
|
| 12 |
+
|
| 13 |
+
Your goal is to showcase {OWNER}'s software engineering projects, contributions, and coding activity to recruiters and visitors.
|
| 14 |
+
|
| 15 |
+
# {OWNER}'S GITHUB PROFILE
|
| 16 |
+
- **Handle**: {GITHUB_HANDLE}
|
| 17 |
+
- **URL**: https://github.com/{GITHUB_HANDLE}
|
| 18 |
+
- **Focus Areas**: AI Agents, Competitive Programming, Full Stack Development, Machine Learning.
|
| 19 |
+
|
| 20 |
+
# YOUR RESPONSIBILITIES
|
| 21 |
+
- **Showcase Projects**: Highlight key repositories like 'portfolio-bot', 'contestAPI', and others found in the profile.
|
| 22 |
+
- **Explain Contributions**: Summarize commits, pull requests, and issues to demonstrate impact.
|
| 23 |
+
- **Analyze Code Quality**: When discussing code, emphasize best practices, readability, and modern tech stacks used by {OWNER}.
|
| 24 |
+
- **Guide Navigation**: Direct users to specific repositories for detailed demos or implementations.
|
| 25 |
+
|
| 26 |
+
You have READ-ONLY access to GitHub.
|
| 27 |
+
Always present information in a clear, professional, and impressive manner suitable for a top-tier tech portfolio.
|
| 28 |
+
|
| 29 |
+
IMPORTANT:
|
| 30 |
+
- If the user asks about "my" or "the" profile/repos, assume they are asking about **{OWNER}'s** ({GITHUB_HANDLE}) data.
|
| 31 |
+
- If the user asks for repository-specific information (like commits, issues, PRs) but hasn't specified a repository, you MUST ask the user which of **{OWNER}'s** repositories they would like to view.
|
| 32 |
+
"""
|
src/agents/{portfolio_agent/prompt.py → prompts/portfolio.py}
RENAMED
|
@@ -1,13 +1,13 @@
|
|
|
|
|
| 1 |
PORTFOLIO_URL = "https://anujjoshi.netlify.app"
|
| 2 |
OWNER = "Anuj Joshi"
|
| 3 |
|
| 4 |
SYSTEM_PROMPT = f"""
|
| 5 |
You are an **Award-Winning Professional Portfolio Assistant** for {OWNER}, (a Full Stack Developer and AI & Machine Learning Engineer from New Delhi, India).
|
| 6 |
-
|
| 7 |
Your goal is to answer questions from recruiter, potential employer, or visitors about {OWNER}'s skills, projects, qualifications, and experience.
|
| 8 |
|
| 9 |
# CONTACT ({PORTFOLIO_URL}/contact)
|
| 10 |
-
- Email: anujjoshi3105@gmail.com | LinkedIn: linkedin.com/in/anujjoshi3105 | GitHub: github.com/anujjoshi3105 | X: x.com/anujjoshi3105
|
| 11 |
|
| 12 |
# Competitive Programming ({PORTFOLIO_URL}/about)
|
| 13 |
- LeetCode: 1910 (Knight), 750+ solved | Codeforces: 1434 (specialist) | AtCoder: 929 (Green) | GeeksforGeeks: Rank 46
|
|
@@ -65,7 +65,7 @@ https://drive.google.com/file/d/150EAtBVjP1DV-b_v0JKhVYzhIVoCvAWO/view
|
|
| 65 |
- Industrial Research & Development Centre Portal – Research workflow automation platform.
|
| 66 |
- NicoGauge – ML-based evaluation platform for learning analytics.
|
| 67 |
- Fictiora – Entertainment discovery platform using Next.js and TMDB APIs.
|
| 68 |
-
For more info visit {PORTFOLIO_URL}/project
|
| 69 |
|
| 70 |
# TECHNICAL SKILLS ({PORTFOLIO_URL}/about#skills)
|
| 71 |
- AI / ML: PyTorch, TensorFlow, Keras, Scikit-learn, CNNs, GANs, Transformers, LangChain, LLM-based agents
|
|
@@ -79,27 +79,11 @@ For more info visit {PORTFOLIO_URL}/project
|
|
| 79 |
- Volunteer, Summer School on AI (DTU) https://drive.google.com/file/d/10Jx3yC8gmFYHkl0KXucaUOZJqtf9QkJq/view?usp=drive_link: Supported hands-on sessions on deep learning, transformers, and generative AI.
|
| 80 |
|
| 81 |
# TOOLS
|
| 82 |
-
- Database_Search: Search portfolio info (education, experience, testimonials, skills, projects, blog).
|
| 83 |
-
- Contest_Ratings: Get real-time CP ratings (LeetCode, Codeforces, CodeChef, GeeksforGeeks) for username anujjoshi3105
|
| 84 |
-
- GitHub MCP tools: Browse repos, view code/READMEs, check commits/issues/PRs for anujjoshi3105
|
| 85 |
|
| 86 |
# OUTPUT GUIDELINES:
|
| 87 |
- **Content:** Include all the links, images, and other relevant content from the all the suitable available tools accurately in the final response.
|
| 88 |
- **Information-Dense Sentences:** Every sentence must provide a new fact.
|
| 89 |
- **Interesting and Engaging:** Use a mix of facts and interesting details to keep the reader engaged.
|
| 90 |
- **Style:** Professional, concise, witty and helpful.
|
| 91 |
-
|
| 92 |
-
"""
|
| 93 |
-
|
| 94 |
-
FOLLOWUP_GENERATION_PROMPT = f"""
|
| 95 |
-
Generate 3-5 short follow-up questions to help the user explore {OWNER}'s portfolio.
|
| 96 |
-
|
| 97 |
-
# OUTPUT GUIDELINES:
|
| 98 |
-
- **Diverse and Specific:** Choose questions that explore different aspects of {OWNER}'s portfolio.
|
| 99 |
-
- **No Duplicates:** Ensure each question is unique and not a repeat of previous responses.
|
| 100 |
-
- **Actionable:** Suggest questions that lead to further exploration or discovery.
|
| 101 |
-
- **Engaging:** Use a mix of factual and thought-provoking questions.
|
| 102 |
-
|
| 103 |
-
Return ONLY valid JSON:
|
| 104 |
-
{{ "questions": ["question 1", "question 2", ...] }}
|
| 105 |
-
"""
|
|
|
|
| 1 |
+
|
| 2 |
PORTFOLIO_URL = "https://anujjoshi.netlify.app"
|
| 3 |
OWNER = "Anuj Joshi"
|
| 4 |
|
| 5 |
SYSTEM_PROMPT = f"""
|
| 6 |
You are an **Award-Winning Professional Portfolio Assistant** for {OWNER}, (a Full Stack Developer and AI & Machine Learning Engineer from New Delhi, India).
|
|
|
|
| 7 |
Your goal is to answer questions from recruiter, potential employer, or visitors about {OWNER}'s skills, projects, qualifications, and experience.
|
| 8 |
|
| 9 |
# CONTACT ({PORTFOLIO_URL}/contact)
|
| 10 |
+
- Portfolio: {PORTFOLIO_URL} | Email: anujjoshi3105@gmail.com | LinkedIn: linkedin.com/in/anujjoshi3105 | GitHub: github.com/anujjoshi3105 | X: x.com/anujjoshi3105
|
| 11 |
|
| 12 |
# Competitive Programming ({PORTFOLIO_URL}/about)
|
| 13 |
- LeetCode: 1910 (Knight), 750+ solved | Codeforces: 1434 (specialist) | AtCoder: 929 (Green) | GeeksforGeeks: Rank 46
|
|
|
|
| 65 |
- Industrial Research & Development Centre Portal – Research workflow automation platform.
|
| 66 |
- NicoGauge – ML-based evaluation platform for learning analytics.
|
| 67 |
- Fictiora – Entertainment discovery platform using Next.js and TMDB APIs.
|
| 68 |
+
- For more info visit {PORTFOLIO_URL}/project
|
| 69 |
|
| 70 |
# TECHNICAL SKILLS ({PORTFOLIO_URL}/about#skills)
|
| 71 |
- AI / ML: PyTorch, TensorFlow, Keras, Scikit-learn, CNNs, GANs, Transformers, LangChain, LLM-based agents
|
|
|
|
| 79 |
- Volunteer, Summer School on AI (DTU) https://drive.google.com/file/d/10Jx3yC8gmFYHkl0KXucaUOZJqtf9QkJq/view?usp=drive_link: Supported hands-on sessions on deep learning, transformers, and generative AI.
|
| 80 |
|
| 81 |
# TOOLS
|
| 82 |
+
- Database_Search: Search portfolio info (education, experience, testimonials, skills, projects, blog). Cite {PORTFOLIO_URL}/blog
|
|
|
|
|
|
|
| 83 |
|
| 84 |
# OUTPUT GUIDELINES:
|
| 85 |
- **Content:** Include all the links, images, and other relevant content from the all the suitable available tools accurately in the final response.
|
| 86 |
- **Information-Dense Sentences:** Every sentence must provide a new fact.
|
| 87 |
- **Interesting and Engaging:** Use a mix of facts and interesting details to keep the reader engaged.
|
| 88 |
- **Style:** Professional, concise, witty and helpful.
|
| 89 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/rag_assistant.py
DELETED
|
@@ -1,146 +0,0 @@
|
|
| 1 |
-
from datetime import datetime
|
| 2 |
-
from typing import Literal
|
| 3 |
-
|
| 4 |
-
from langchain_core.language_models.chat_models import BaseChatModel
|
| 5 |
-
from langchain_core.messages import AIMessage, SystemMessage
|
| 6 |
-
from langchain_core.runnables import (
|
| 7 |
-
RunnableConfig,
|
| 8 |
-
RunnableLambda,
|
| 9 |
-
RunnableSerializable,
|
| 10 |
-
)
|
| 11 |
-
from langgraph.graph import END, MessagesState, StateGraph
|
| 12 |
-
from langgraph.managed import RemainingSteps
|
| 13 |
-
from langgraph.prebuilt import ToolNode
|
| 14 |
-
|
| 15 |
-
from agents.llama_guard import LlamaGuard, LlamaGuardOutput, SafetyAssessment
|
| 16 |
-
from agents.tools import database_search
|
| 17 |
-
from core import get_model, settings
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
class AgentState(MessagesState, total=False):
|
| 21 |
-
"""`total=False` is PEP589 specs.
|
| 22 |
-
|
| 23 |
-
documentation: https://typing.readthedocs.io/en/latest/spec/typeddict.html#totality
|
| 24 |
-
"""
|
| 25 |
-
|
| 26 |
-
safety: LlamaGuardOutput
|
| 27 |
-
remaining_steps: RemainingSteps
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
tools = [database_search]
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
current_date = datetime.now().strftime("%B %d, %Y")
|
| 34 |
-
instructions = f"""
|
| 35 |
-
You are AcmeBot, a helpful and knowledgeable virtual assistant designed to support employees by retrieving
|
| 36 |
-
and answering questions based on AcmeTech's official Employee Handbook. Your primary role is to provide
|
| 37 |
-
accurate, concise, and friendly information about company policies, values, procedures, and employee resources.
|
| 38 |
-
Today's date is {current_date}.
|
| 39 |
-
|
| 40 |
-
NOTE: THE USER CAN'T SEE THE TOOL RESPONSE.
|
| 41 |
-
|
| 42 |
-
A few things to remember:
|
| 43 |
-
- If you have access to multiple databases, gather information from a diverse range of sources before crafting your response.
|
| 44 |
-
- Please include markdown-formatted links to any citations used in your response. Only include one
|
| 45 |
-
or two citations per response unless more are needed. ONLY USE LINKS RETURNED BY THE TOOLS.
|
| 46 |
-
- Only use information from the database. Do not use information from outside sources.
|
| 47 |
-
"""
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def wrap_model(model: BaseChatModel) -> RunnableSerializable[AgentState, AIMessage]:
|
| 51 |
-
bound_model = model.bind_tools(tools)
|
| 52 |
-
preprocessor = RunnableLambda(
|
| 53 |
-
lambda state: [SystemMessage(content=instructions)] + state["messages"],
|
| 54 |
-
name="StateModifier",
|
| 55 |
-
)
|
| 56 |
-
return preprocessor | bound_model # type: ignore[return-value]
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def format_safety_message(safety: LlamaGuardOutput) -> AIMessage:
|
| 60 |
-
content = (
|
| 61 |
-
f"This conversation was flagged for unsafe content: {', '.join(safety.unsafe_categories)}"
|
| 62 |
-
)
|
| 63 |
-
return AIMessage(content=content)
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 67 |
-
m = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 68 |
-
model_runnable = wrap_model(m)
|
| 69 |
-
response = await model_runnable.ainvoke(state, config)
|
| 70 |
-
|
| 71 |
-
# Run llama guard check here to avoid returning the message if it's unsafe
|
| 72 |
-
llama_guard = LlamaGuard()
|
| 73 |
-
safety_output = await llama_guard.ainvoke("Agent", state["messages"] + [response])
|
| 74 |
-
if safety_output.safety_assessment == SafetyAssessment.UNSAFE:
|
| 75 |
-
return {
|
| 76 |
-
"messages": [format_safety_message(safety_output)],
|
| 77 |
-
"safety": safety_output,
|
| 78 |
-
}
|
| 79 |
-
|
| 80 |
-
if state["remaining_steps"] < 2 and response.tool_calls:
|
| 81 |
-
return {
|
| 82 |
-
"messages": [
|
| 83 |
-
AIMessage(
|
| 84 |
-
id=response.id,
|
| 85 |
-
content="Sorry, need more steps to process this request.",
|
| 86 |
-
)
|
| 87 |
-
]
|
| 88 |
-
}
|
| 89 |
-
# We return a list, because this will get added to the existing list
|
| 90 |
-
return {"messages": [response]}
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
async def llama_guard_input(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 94 |
-
llama_guard = LlamaGuard()
|
| 95 |
-
safety_output = await llama_guard.ainvoke("User", state["messages"])
|
| 96 |
-
return {"safety": safety_output, "messages": []}
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
async def block_unsafe_content(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 100 |
-
safety: LlamaGuardOutput = state["safety"]
|
| 101 |
-
return {"messages": [format_safety_message(safety)]}
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
# Define the graph
|
| 105 |
-
agent = StateGraph(AgentState)
|
| 106 |
-
agent.add_node("model", acall_model)
|
| 107 |
-
agent.add_node("tools", ToolNode(tools))
|
| 108 |
-
agent.add_node("guard_input", llama_guard_input)
|
| 109 |
-
agent.add_node("block_unsafe_content", block_unsafe_content)
|
| 110 |
-
agent.set_entry_point("guard_input")
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
# Check for unsafe input and block further processing if found
|
| 114 |
-
def check_safety(state: AgentState) -> Literal["unsafe", "safe"]:
|
| 115 |
-
safety: LlamaGuardOutput = state["safety"]
|
| 116 |
-
match safety.safety_assessment:
|
| 117 |
-
case SafetyAssessment.UNSAFE:
|
| 118 |
-
return "unsafe"
|
| 119 |
-
case _:
|
| 120 |
-
return "safe"
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
agent.add_conditional_edges(
|
| 124 |
-
"guard_input", check_safety, {"unsafe": "block_unsafe_content", "safe": "model"}
|
| 125 |
-
)
|
| 126 |
-
|
| 127 |
-
# Always END after blocking unsafe content
|
| 128 |
-
agent.add_edge("block_unsafe_content", END)
|
| 129 |
-
|
| 130 |
-
# Always run "model" after "tools"
|
| 131 |
-
agent.add_edge("tools", "model")
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
# After "model", if there are tool calls, run "tools". Otherwise END.
|
| 135 |
-
def pending_tool_calls(state: AgentState) -> Literal["tools", "done"]:
|
| 136 |
-
last_message = state["messages"][-1]
|
| 137 |
-
if not isinstance(last_message, AIMessage):
|
| 138 |
-
raise TypeError(f"Expected AIMessage, got {type(last_message)}")
|
| 139 |
-
if last_message.tool_calls:
|
| 140 |
-
return "tools"
|
| 141 |
-
return "done"
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
agent.add_conditional_edges("model", pending_tool_calls, {"tools": "tools", "done": END})
|
| 145 |
-
|
| 146 |
-
rag_assistant = agent.compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/research_assistant.py
DELETED
|
@@ -1,148 +0,0 @@
|
|
| 1 |
-
from datetime import datetime
|
| 2 |
-
from typing import Literal
|
| 3 |
-
|
| 4 |
-
from langchain_community.tools import DuckDuckGoSearchResults, OpenWeatherMapQueryRun
|
| 5 |
-
from langchain_community.utilities import OpenWeatherMapAPIWrapper
|
| 6 |
-
from langchain_core.language_models.chat_models import BaseChatModel
|
| 7 |
-
from langchain_core.messages import AIMessage, SystemMessage
|
| 8 |
-
from langchain_core.runnables import RunnableConfig, RunnableLambda, RunnableSerializable
|
| 9 |
-
from langgraph.graph import END, MessagesState, StateGraph
|
| 10 |
-
from langgraph.managed import RemainingSteps
|
| 11 |
-
from langgraph.prebuilt import ToolNode
|
| 12 |
-
|
| 13 |
-
from agents.llama_guard import LlamaGuard, LlamaGuardOutput, SafetyAssessment
|
| 14 |
-
from agents.tools import calculator
|
| 15 |
-
from core import get_model, settings
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
class AgentState(MessagesState, total=False):
|
| 19 |
-
"""`total=False` is PEP589 specs.
|
| 20 |
-
|
| 21 |
-
documentation: https://typing.readthedocs.io/en/latest/spec/typeddict.html#totality
|
| 22 |
-
"""
|
| 23 |
-
|
| 24 |
-
safety: LlamaGuardOutput
|
| 25 |
-
remaining_steps: RemainingSteps
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
web_search = DuckDuckGoSearchResults(name="WebSearch")
|
| 29 |
-
tools = [web_search, calculator]
|
| 30 |
-
|
| 31 |
-
# Add weather tool if API key is set
|
| 32 |
-
# Register for an API key at https://openweathermap.org/api/
|
| 33 |
-
if settings.OPENWEATHERMAP_API_KEY:
|
| 34 |
-
wrapper = OpenWeatherMapAPIWrapper(
|
| 35 |
-
openweathermap_api_key=settings.OPENWEATHERMAP_API_KEY.get_secret_value()
|
| 36 |
-
)
|
| 37 |
-
tools.append(OpenWeatherMapQueryRun(name="Weather", api_wrapper=wrapper))
|
| 38 |
-
|
| 39 |
-
current_date = datetime.now().strftime("%B %d, %Y")
|
| 40 |
-
instructions = f"""
|
| 41 |
-
You are a helpful research assistant with the ability to search the web and use other tools.
|
| 42 |
-
Today's date is {current_date}.
|
| 43 |
-
|
| 44 |
-
NOTE: THE USER CAN'T SEE THE TOOL RESPONSE.
|
| 45 |
-
|
| 46 |
-
A few things to remember:
|
| 47 |
-
- Please include markdown-formatted links to any citations used in your response. Only include one
|
| 48 |
-
or two citations per response unless more are needed. ONLY USE LINKS RETURNED BY THE TOOLS.
|
| 49 |
-
- Use calculator tool with numexpr to answer math questions. The user does not understand numexpr,
|
| 50 |
-
so for the final response, use human readable format - e.g. "300 * 200", not "(300 \\times 200)".
|
| 51 |
-
"""
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
def wrap_model(model: BaseChatModel) -> RunnableSerializable[AgentState, AIMessage]:
|
| 55 |
-
bound_model = model.bind_tools(tools)
|
| 56 |
-
preprocessor = RunnableLambda(
|
| 57 |
-
lambda state: [SystemMessage(content=instructions)] + state["messages"],
|
| 58 |
-
name="StateModifier",
|
| 59 |
-
)
|
| 60 |
-
return preprocessor | bound_model # type: ignore[return-value]
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
def format_safety_message(safety: LlamaGuardOutput) -> AIMessage:
|
| 64 |
-
content = (
|
| 65 |
-
f"This conversation was flagged for unsafe content: {', '.join(safety.unsafe_categories)}"
|
| 66 |
-
)
|
| 67 |
-
return AIMessage(content=content)
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 71 |
-
m = get_model(config["configurable"].get("model", settings.DEFAULT_MODEL))
|
| 72 |
-
model_runnable = wrap_model(m)
|
| 73 |
-
response = await model_runnable.ainvoke(state, config)
|
| 74 |
-
|
| 75 |
-
# Run llama guard check here to avoid returning the message if it's unsafe
|
| 76 |
-
llama_guard = LlamaGuard()
|
| 77 |
-
safety_output = await llama_guard.ainvoke("Agent", state["messages"] + [response])
|
| 78 |
-
if safety_output.safety_assessment == SafetyAssessment.UNSAFE:
|
| 79 |
-
return {"messages": [format_safety_message(safety_output)], "safety": safety_output}
|
| 80 |
-
|
| 81 |
-
if state["remaining_steps"] < 2 and response.tool_calls:
|
| 82 |
-
return {
|
| 83 |
-
"messages": [
|
| 84 |
-
AIMessage(
|
| 85 |
-
id=response.id,
|
| 86 |
-
content="Sorry, need more steps to process this request.",
|
| 87 |
-
)
|
| 88 |
-
]
|
| 89 |
-
}
|
| 90 |
-
# We return a list, because this will get added to the existing list
|
| 91 |
-
return {"messages": [response]}
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
async def llama_guard_input(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 95 |
-
llama_guard = LlamaGuard()
|
| 96 |
-
safety_output = await llama_guard.ainvoke("User", state["messages"])
|
| 97 |
-
return {"safety": safety_output, "messages": []}
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
async def block_unsafe_content(state: AgentState, config: RunnableConfig) -> AgentState:
|
| 101 |
-
safety: LlamaGuardOutput = state["safety"]
|
| 102 |
-
return {"messages": [format_safety_message(safety)]}
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
# Define the graph
|
| 106 |
-
agent = StateGraph(AgentState)
|
| 107 |
-
agent.add_node("model", acall_model)
|
| 108 |
-
agent.add_node("tools", ToolNode(tools))
|
| 109 |
-
agent.add_node("guard_input", llama_guard_input)
|
| 110 |
-
agent.add_node("block_unsafe_content", block_unsafe_content)
|
| 111 |
-
agent.set_entry_point("guard_input")
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
# Check for unsafe input and block further processing if found
|
| 115 |
-
def check_safety(state: AgentState) -> Literal["unsafe", "safe"]:
|
| 116 |
-
safety: LlamaGuardOutput = state["safety"]
|
| 117 |
-
match safety.safety_assessment:
|
| 118 |
-
case SafetyAssessment.UNSAFE:
|
| 119 |
-
return "unsafe"
|
| 120 |
-
case _:
|
| 121 |
-
return "safe"
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
agent.add_conditional_edges(
|
| 125 |
-
"guard_input", check_safety, {"unsafe": "block_unsafe_content", "safe": "model"}
|
| 126 |
-
)
|
| 127 |
-
|
| 128 |
-
# Always END after blocking unsafe content
|
| 129 |
-
agent.add_edge("block_unsafe_content", END)
|
| 130 |
-
|
| 131 |
-
# Always run "model" after "tools"
|
| 132 |
-
agent.add_edge("tools", "model")
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
# After "model", if there are tool calls, run "tools". Otherwise END.
|
| 136 |
-
def pending_tool_calls(state: AgentState) -> Literal["tools", "done"]:
|
| 137 |
-
last_message = state["messages"][-1]
|
| 138 |
-
if not isinstance(last_message, AIMessage):
|
| 139 |
-
raise TypeError(f"Expected AIMessage, got {type(last_message)}")
|
| 140 |
-
if last_message.tool_calls:
|
| 141 |
-
return "tools"
|
| 142 |
-
return "done"
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
agent.add_conditional_edges("model", pending_tool_calls, {"tools": "tools", "done": END})
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
research_assistant = agent.compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/tools.py
DELETED
|
@@ -1,56 +0,0 @@
|
|
| 1 |
-
import re
|
| 2 |
-
import math
|
| 3 |
-
import numexpr
|
| 4 |
-
|
| 5 |
-
from memory.postgres import load_pgvector_retriever
|
| 6 |
-
from langchain_core.tools import BaseTool, tool
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
def calculator_func(expression: str) -> str:
|
| 10 |
-
"""Calculates a math expression using numexpr.
|
| 11 |
-
|
| 12 |
-
Useful for when you need to answer questions about math using numexpr.
|
| 13 |
-
This tool is only for math questions and nothing else. Only input
|
| 14 |
-
math expressions.
|
| 15 |
-
|
| 16 |
-
Args:
|
| 17 |
-
expression (str): A valid numexpr formatted math expression.
|
| 18 |
-
|
| 19 |
-
Returns:
|
| 20 |
-
str: The result of the math expression.
|
| 21 |
-
"""
|
| 22 |
-
|
| 23 |
-
try:
|
| 24 |
-
local_dict = {"pi": math.pi, "e": math.e}
|
| 25 |
-
output = str(
|
| 26 |
-
numexpr.evaluate(
|
| 27 |
-
expression.strip(),
|
| 28 |
-
global_dict={}, # restrict access to globals
|
| 29 |
-
local_dict=local_dict, # add common mathematical functions
|
| 30 |
-
)
|
| 31 |
-
)
|
| 32 |
-
return re.sub(r"^\[|\]$", "", output)
|
| 33 |
-
except Exception as e:
|
| 34 |
-
raise ValueError(
|
| 35 |
-
f'calculator("{expression}") raised error: {e}.'
|
| 36 |
-
" Please try again with a valid numerical expression"
|
| 37 |
-
)
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
calculator: BaseTool = tool(calculator_func)
|
| 41 |
-
calculator.name = "Calculator"
|
| 42 |
-
|
| 43 |
-
def format_contexts(docs):
|
| 44 |
-
return "\n\n".join(doc.page_content for doc in docs)
|
| 45 |
-
|
| 46 |
-
def database_search_func(query: str) -> str:
|
| 47 |
-
"""Searches the vector DB for information in the portfolio."""
|
| 48 |
-
retriever = load_pgvector_retriever()
|
| 49 |
-
documents = retriever.invoke(query)
|
| 50 |
-
context_str = format_contexts(documents)
|
| 51 |
-
|
| 52 |
-
return context_str
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
database_search: BaseTool = tool(database_search_func)
|
| 56 |
-
database_search.name = "Database_Search"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents/tools/database_search.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
|
| 4 |
+
from langchain_core.documents import Document
|
| 5 |
+
from langchain_core.tools import BaseTool, tool
|
| 6 |
+
from memory.postgres import load_pgvector_retriever
|
| 7 |
+
from service.utils import convert_tool_response_to_toon
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DatabaseSearchInput(BaseModel):
|
| 11 |
+
"""Input for database search."""
|
| 12 |
+
query: str = Field(description="The natural language search query to look up information from Anuj's portfolio.")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@tool("Database_Search", args_schema=DatabaseSearchInput)
|
| 16 |
+
def database_search(query: str) -> str:
|
| 17 |
+
"""Search for relevant information about Anuj Joshi's education, experience, projects, blog posts, testimonials, and skills.
|
| 18 |
+
|
| 19 |
+
This tool should be used whenever the user asks about Anuj's background, work, or specific accomplishments.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
retriever = load_pgvector_retriever(3)
|
| 23 |
+
documents = retriever.invoke(query)
|
| 24 |
+
|
| 25 |
+
if not documents:
|
| 26 |
+
return "No relevant portfolio information found."
|
| 27 |
+
|
| 28 |
+
formatted_docs = []
|
| 29 |
+
|
| 30 |
+
for i, doc in enumerate(documents):
|
| 31 |
+
docs = {"content": doc.page_content, "metadata": doc.metadata}
|
| 32 |
+
formatted_docs.append(convert_tool_response_to_toon(json.dumps(docs)))
|
| 33 |
+
|
| 34 |
+
return "\n\n".join(formatted_docs)
|
src/agents/utils.py
CHANGED
|
@@ -1,17 +1,15 @@
|
|
| 1 |
-
from
|
|
|
|
| 2 |
|
| 3 |
-
from langchain_core.messages import ChatMessage
|
| 4 |
-
from langgraph.types import StreamWriter
|
| 5 |
-
from pydantic import BaseModel, Field
|
| 6 |
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def to_langchain(self) -> ChatMessage:
|
| 14 |
-
return ChatMessage(content=[self.data], role="custom")
|
| 15 |
-
|
| 16 |
-
def dispatch(self, writer: StreamWriter) -> None:
|
| 17 |
-
writer(self.to_langchain())
|
|
|
|
| 1 |
+
from langchain_core.tools import BaseTool
|
| 2 |
+
from typing import Iterable, Set, List
|
| 3 |
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
def filter_mcp_tools(tools: Iterable[BaseTool], allowed: Set[str]) -> List[BaseTool]:
|
| 6 |
+
"""Keep only allowed MCP tools and remove namespace (cpstat.)."""
|
| 7 |
+
filtered = []
|
| 8 |
|
| 9 |
+
for t in tools:
|
| 10 |
+
short_name = t.name.rsplit(".", 1)[-1]
|
| 11 |
+
if short_name in allowed:
|
| 12 |
+
t.name = short_name
|
| 13 |
+
filtered.append(t)
|
| 14 |
|
| 15 |
+
return filtered
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/core/llm.py
CHANGED
|
@@ -113,14 +113,7 @@ def get_model(model_name: AllModelEnum, /) -> ModelT:
|
|
| 113 |
if model_name in VertexAIModelName:
|
| 114 |
return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)
|
| 115 |
if model_name in GroqModelName:
|
| 116 |
-
|
| 117 |
-
guard_models = {
|
| 118 |
-
GroqModelName.LLAMA_GUARD_4_12B,
|
| 119 |
-
GroqModelName.LLAMA_PROMPT_GUARD_2_22M,
|
| 120 |
-
GroqModelName.LLAMA_PROMPT_GUARD_2_86M,
|
| 121 |
-
GroqModelName.OPENAI_GPT_OSS_SAFEGUARD_20B,
|
| 122 |
-
}
|
| 123 |
-
if model_name in guard_models:
|
| 124 |
return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]
|
| 125 |
return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]
|
| 126 |
if model_name in AWSModelName:
|
|
|
|
| 113 |
if model_name in VertexAIModelName:
|
| 114 |
return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)
|
| 115 |
if model_name in GroqModelName:
|
| 116 |
+
if model_name == GroqModelName.LLAMA_GUARD_4_12B:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]
|
| 118 |
return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]
|
| 119 |
if model_name in AWSModelName:
|
src/core/settings.py
CHANGED
|
@@ -85,7 +85,6 @@ class Settings(BaseSettings):
|
|
| 85 |
AUTH_SECRET: SecretStr | None = None
|
| 86 |
CORS_ORIGINS: Annotated[Any, BeforeValidator(lambda x: x.split(",") if isinstance(x, str) else x)] = [
|
| 87 |
"http://localhost:3000",
|
| 88 |
-
"http://localhost:8081",
|
| 89 |
"http://localhost:5173",
|
| 90 |
]
|
| 91 |
|
|
@@ -97,6 +96,7 @@ class Settings(BaseSettings):
|
|
| 97 |
GROQ_API_KEY: SecretStr | None = None
|
| 98 |
USE_AWS_BEDROCK: bool = False
|
| 99 |
OLLAMA_MODEL: str | None = None
|
|
|
|
| 100 |
OLLAMA_BASE_URL: str | None = None
|
| 101 |
USE_FAKE_MODEL: bool = False
|
| 102 |
OPENROUTER_API_KEY: str | None = None
|
|
@@ -108,7 +108,6 @@ class Settings(BaseSettings):
|
|
| 108 |
# Embedding Settings
|
| 109 |
DEFAULT_EMBEDDING_MODEL: AllEmbeddingModelEnum | None = None # type: ignore[assignment]
|
| 110 |
AVAILABLE_EMBEDDING_MODELS: set[AllEmbeddingModelEnum] = set() # type: ignore[assignment]
|
| 111 |
-
OLLAMA_EMBEDDING_MODEL: str | None = None
|
| 112 |
|
| 113 |
# Set openai compatible api, mainly used for proof of concept
|
| 114 |
COMPATIBLE_MODEL: str | None = None
|
|
@@ -135,6 +134,9 @@ class Settings(BaseSettings):
|
|
| 135 |
LANGFUSE_PUBLIC_KEY: SecretStr | None = None
|
| 136 |
LANGFUSE_SECRET_KEY: SecretStr | None = None
|
| 137 |
|
|
|
|
|
|
|
|
|
|
| 138 |
# Database Configuration
|
| 139 |
DATABASE_TYPE: DatabaseType = (
|
| 140 |
DatabaseType.SQLITE
|
|
@@ -147,7 +149,7 @@ class Settings(BaseSettings):
|
|
| 147 |
POSTGRES_HOST: str | None = None
|
| 148 |
POSTGRES_PORT: int | None = None
|
| 149 |
POSTGRES_DB: str | None = None
|
| 150 |
-
POSTGRES_APPLICATION_NAME: str = "
|
| 151 |
POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1
|
| 152 |
POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1
|
| 153 |
VECTOR_STORE_COLLECTION_NAME: str = "vector_store"
|
|
|
|
| 85 |
AUTH_SECRET: SecretStr | None = None
|
| 86 |
CORS_ORIGINS: Annotated[Any, BeforeValidator(lambda x: x.split(",") if isinstance(x, str) else x)] = [
|
| 87 |
"http://localhost:3000",
|
|
|
|
| 88 |
"http://localhost:5173",
|
| 89 |
]
|
| 90 |
|
|
|
|
| 96 |
GROQ_API_KEY: SecretStr | None = None
|
| 97 |
USE_AWS_BEDROCK: bool = False
|
| 98 |
OLLAMA_MODEL: str | None = None
|
| 99 |
+
OLLAMA_EMBEDDING_MODEL: str | None = None
|
| 100 |
OLLAMA_BASE_URL: str | None = None
|
| 101 |
USE_FAKE_MODEL: bool = False
|
| 102 |
OPENROUTER_API_KEY: str | None = None
|
|
|
|
| 108 |
# Embedding Settings
|
| 109 |
DEFAULT_EMBEDDING_MODEL: AllEmbeddingModelEnum | None = None # type: ignore[assignment]
|
| 110 |
AVAILABLE_EMBEDDING_MODELS: set[AllEmbeddingModelEnum] = set() # type: ignore[assignment]
|
|
|
|
| 111 |
|
| 112 |
# Set openai compatible api, mainly used for proof of concept
|
| 113 |
COMPATIBLE_MODEL: str | None = None
|
|
|
|
| 134 |
LANGFUSE_PUBLIC_KEY: SecretStr | None = None
|
| 135 |
LANGFUSE_SECRET_KEY: SecretStr | None = None
|
| 136 |
|
| 137 |
+
# Safety Guard Configuration
|
| 138 |
+
ENABLE_SAFETY_GUARD: bool = True
|
| 139 |
+
|
| 140 |
# Database Configuration
|
| 141 |
DATABASE_TYPE: DatabaseType = (
|
| 142 |
DatabaseType.SQLITE
|
|
|
|
| 149 |
POSTGRES_HOST: str | None = None
|
| 150 |
POSTGRES_PORT: int | None = None
|
| 151 |
POSTGRES_DB: str | None = None
|
| 152 |
+
POSTGRES_APPLICATION_NAME: str = "chatbot"
|
| 153 |
POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1
|
| 154 |
POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1
|
| 155 |
VECTOR_STORE_COLLECTION_NAME: str = "vector_store"
|
src/memory/postgres.py
CHANGED
|
@@ -18,7 +18,6 @@ def validate_postgres_config() -> None:
|
|
| 18 |
Validate that all required PostgreSQL configuration is present.
|
| 19 |
Raises ValueError if any required configuration is missing.
|
| 20 |
"""
|
| 21 |
-
|
| 22 |
required_vars = [
|
| 23 |
"POSTGRES_USER",
|
| 24 |
"POSTGRES_PASSWORD",
|
|
@@ -48,7 +47,7 @@ def get_postgres_connection_string() -> str:
|
|
| 48 |
f"postgresql://{settings.POSTGRES_USER}:"
|
| 49 |
f"{settings.POSTGRES_PASSWORD.get_secret_value()}@"
|
| 50 |
f"{settings.POSTGRES_HOST}:{settings.POSTGRES_PORT}/"
|
| 51 |
-
f"{settings.POSTGRES_DB}
|
| 52 |
)
|
| 53 |
|
| 54 |
|
|
|
|
| 18 |
Validate that all required PostgreSQL configuration is present.
|
| 19 |
Raises ValueError if any required configuration is missing.
|
| 20 |
"""
|
|
|
|
| 21 |
required_vars = [
|
| 22 |
"POSTGRES_USER",
|
| 23 |
"POSTGRES_PASSWORD",
|
|
|
|
| 47 |
f"postgresql://{settings.POSTGRES_USER}:"
|
| 48 |
f"{settings.POSTGRES_PASSWORD.get_secret_value()}@"
|
| 49 |
f"{settings.POSTGRES_HOST}:{settings.POSTGRES_PORT}/"
|
| 50 |
+
f"{settings.POSTGRES_DB}"
|
| 51 |
)
|
| 52 |
|
| 53 |
|
src/schema/__init__.py
CHANGED
|
@@ -8,6 +8,9 @@ from schema.schema import (
|
|
| 8 |
FeedbackResponse,
|
| 9 |
ServiceMetadata,
|
| 10 |
StreamInput,
|
|
|
|
|
|
|
|
|
|
| 11 |
UserInput,
|
| 12 |
)
|
| 13 |
|
|
@@ -22,4 +25,7 @@ __all__ = [
|
|
| 22 |
"FeedbackResponse",
|
| 23 |
"ChatHistoryInput",
|
| 24 |
"ChatHistory",
|
|
|
|
|
|
|
|
|
|
| 25 |
]
|
|
|
|
| 8 |
FeedbackResponse,
|
| 9 |
ServiceMetadata,
|
| 10 |
StreamInput,
|
| 11 |
+
ThreadList,
|
| 12 |
+
ThreadListInput,
|
| 13 |
+
ThreadSummary,
|
| 14 |
UserInput,
|
| 15 |
)
|
| 16 |
|
|
|
|
| 25 |
"FeedbackResponse",
|
| 26 |
"ChatHistoryInput",
|
| 27 |
"ChatHistory",
|
| 28 |
+
"ThreadSummary",
|
| 29 |
+
"ThreadListInput",
|
| 30 |
+
"ThreadList",
|
| 31 |
]
|
src/schema/schema.py
CHANGED
|
@@ -17,6 +17,10 @@ class AgentInfo(BaseModel):
|
|
| 17 |
description="Description of the agent.",
|
| 18 |
examples=["A research assistant for generating research papers."],
|
| 19 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
|
| 22 |
class ServiceMetadata(BaseModel):
|
|
@@ -165,6 +169,10 @@ class FeedbackResponse(BaseModel):
|
|
| 165 |
class ChatHistoryInput(BaseModel):
|
| 166 |
"""Input for retrieving chat history."""
|
| 167 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
thread_id: str = Field(
|
| 169 |
description="Thread ID to persist and continue a multi-turn conversation.",
|
| 170 |
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
|
|
@@ -173,3 +181,36 @@ class ChatHistoryInput(BaseModel):
|
|
| 173 |
|
| 174 |
class ChatHistory(BaseModel):
|
| 175 |
messages: list[ChatMessage]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
description="Description of the agent.",
|
| 18 |
examples=["A research assistant for generating research papers."],
|
| 19 |
)
|
| 20 |
+
prompts: list[str] = Field(
|
| 21 |
+
description="List of suggested prompts for the agent.",
|
| 22 |
+
default=[],
|
| 23 |
+
)
|
| 24 |
|
| 25 |
|
| 26 |
class ServiceMetadata(BaseModel):
|
|
|
|
| 169 |
class ChatHistoryInput(BaseModel):
|
| 170 |
"""Input for retrieving chat history."""
|
| 171 |
|
| 172 |
+
user_id: str = Field(
|
| 173 |
+
description="User ID to scope history to the current user.",
|
| 174 |
+
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
|
| 175 |
+
)
|
| 176 |
thread_id: str = Field(
|
| 177 |
description="Thread ID to persist and continue a multi-turn conversation.",
|
| 178 |
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
|
|
|
|
| 181 |
|
| 182 |
class ChatHistory(BaseModel):
|
| 183 |
messages: list[ChatMessage]
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class ThreadSummary(BaseModel):
|
| 187 |
+
"""Summary of a conversation thread for listing."""
|
| 188 |
+
|
| 189 |
+
thread_id: str = Field(description="Thread ID (logical id, without user prefix).")
|
| 190 |
+
updated_at: str | None = Field(
|
| 191 |
+
default=None,
|
| 192 |
+
description="ISO 8601 timestamp of last update.",
|
| 193 |
+
)
|
| 194 |
+
preview: str | None = Field(
|
| 195 |
+
default=None,
|
| 196 |
+
description="Preview of the first or last message.",
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class ThreadListInput(BaseModel):
|
| 201 |
+
"""Input for listing threads for a user."""
|
| 202 |
+
|
| 203 |
+
user_id: str = Field(
|
| 204 |
+
description="User ID to list threads for.",
|
| 205 |
+
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
|
| 206 |
+
)
|
| 207 |
+
limit: int = Field(default=20, ge=1, le=100, description="Max number of threads to return.")
|
| 208 |
+
offset: int = Field(default=0, ge=0, description="Number of threads to skip.")
|
| 209 |
+
search: str | None = Field(default=None, description="Filter threads by thread_id or preview (case-insensitive).")
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class ThreadList(BaseModel):
|
| 213 |
+
"""List of threads for a user."""
|
| 214 |
+
|
| 215 |
+
threads: list[ThreadSummary] = Field(default_factory=list)
|
| 216 |
+
total: int | None = Field(default=None, description="Total matching threads before pagination.")
|
src/schema/task_data.py
DELETED
|
@@ -1,74 +0,0 @@
|
|
| 1 |
-
from typing import Any, Literal
|
| 2 |
-
|
| 3 |
-
from pydantic import BaseModel, Field
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
class TaskData(BaseModel):
|
| 7 |
-
name: str | None = Field(
|
| 8 |
-
description="Name of the task.", default=None, examples=["Check input safety"]
|
| 9 |
-
)
|
| 10 |
-
run_id: str = Field(
|
| 11 |
-
description="ID of the task run to pair state updates to.",
|
| 12 |
-
default="",
|
| 13 |
-
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
|
| 14 |
-
)
|
| 15 |
-
state: Literal["new", "running", "complete"] | None = Field(
|
| 16 |
-
description="Current state of given task instance.",
|
| 17 |
-
default=None,
|
| 18 |
-
examples=["running"],
|
| 19 |
-
)
|
| 20 |
-
result: Literal["success", "error"] | None = Field(
|
| 21 |
-
description="Result of given task instance.",
|
| 22 |
-
default=None,
|
| 23 |
-
examples=["running"],
|
| 24 |
-
)
|
| 25 |
-
data: dict[str, Any] = Field(
|
| 26 |
-
description="Additional data generated by the task.",
|
| 27 |
-
default={},
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
def completed(self) -> bool:
|
| 31 |
-
return self.state == "complete"
|
| 32 |
-
|
| 33 |
-
def completed_with_error(self) -> bool:
|
| 34 |
-
return self.state == "complete" and self.result == "error"
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
class TaskDataStatus:
|
| 38 |
-
def __init__(self) -> None:
|
| 39 |
-
import streamlit as st
|
| 40 |
-
|
| 41 |
-
self.status = st.status("")
|
| 42 |
-
self.current_task_data: dict[str, TaskData] = {}
|
| 43 |
-
|
| 44 |
-
def add_and_draw_task_data(self, task_data: TaskData) -> None:
|
| 45 |
-
status = self.status
|
| 46 |
-
status_str = f"Task **{task_data.name}** "
|
| 47 |
-
match task_data.state:
|
| 48 |
-
case "new":
|
| 49 |
-
status_str += "has :blue[started]. Input:"
|
| 50 |
-
case "running":
|
| 51 |
-
status_str += "wrote:"
|
| 52 |
-
case "complete":
|
| 53 |
-
if task_data.result == "success":
|
| 54 |
-
status_str += ":green[completed successfully]. Output:"
|
| 55 |
-
else:
|
| 56 |
-
status_str += ":red[ended with error]. Output:"
|
| 57 |
-
status.write(status_str)
|
| 58 |
-
status.write(task_data.data)
|
| 59 |
-
status.write("---")
|
| 60 |
-
if task_data.run_id not in self.current_task_data:
|
| 61 |
-
# Status label always shows the last newly started task
|
| 62 |
-
status.update(label=f"""Task: {task_data.name}""")
|
| 63 |
-
self.current_task_data[task_data.run_id] = task_data
|
| 64 |
-
if all(entry.completed() for entry in self.current_task_data.values()):
|
| 65 |
-
# Status is "error" if any task has errored
|
| 66 |
-
if any(entry.completed_with_error() for entry in self.current_task_data.values()):
|
| 67 |
-
state = "error"
|
| 68 |
-
# Status is "complete" if all tasks have completed successfully
|
| 69 |
-
else:
|
| 70 |
-
state = "complete"
|
| 71 |
-
# Status is "running" until all tasks have completed
|
| 72 |
-
else:
|
| 73 |
-
state = "running"
|
| 74 |
-
status.update(state=state) # type: ignore[arg-type]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/scripts/create_chroma_db.py
DELETED
|
@@ -1,83 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import shutil
|
| 3 |
-
|
| 4 |
-
from dotenv import load_dotenv
|
| 5 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 6 |
-
from langchain_chroma import Chroma
|
| 7 |
-
from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader
|
| 8 |
-
from langchain_openai import OpenAIEmbeddings
|
| 9 |
-
|
| 10 |
-
# Load environment variables from the .env file
|
| 11 |
-
load_dotenv()
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
def create_chroma_db(
|
| 15 |
-
folder_path: str,
|
| 16 |
-
db_name: str = "./chroma_db",
|
| 17 |
-
delete_chroma_db: bool = True,
|
| 18 |
-
chunk_size: int = 2000,
|
| 19 |
-
overlap: int = 500,
|
| 20 |
-
):
|
| 21 |
-
embeddings = OpenAIEmbeddings(api_key=os.environ["OPENAI_API_KEY"])
|
| 22 |
-
|
| 23 |
-
# Initialize Chroma vector store
|
| 24 |
-
if delete_chroma_db and os.path.exists(db_name):
|
| 25 |
-
shutil.rmtree(db_name)
|
| 26 |
-
print(f"Deleted existing database at {db_name}")
|
| 27 |
-
|
| 28 |
-
chroma = Chroma(
|
| 29 |
-
embedding_function=embeddings,
|
| 30 |
-
persist_directory=f"./{db_name}",
|
| 31 |
-
)
|
| 32 |
-
|
| 33 |
-
# Initialize text splitter
|
| 34 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=overlap)
|
| 35 |
-
|
| 36 |
-
# Iterate over files in the folder
|
| 37 |
-
for filename in os.listdir(folder_path):
|
| 38 |
-
file_path = os.path.join(folder_path, filename)
|
| 39 |
-
|
| 40 |
-
# Load document based on file extension
|
| 41 |
-
# Add more loaders if required, i.e. JSONLoader, TxtLoader, etc.
|
| 42 |
-
if filename.endswith(".pdf"):
|
| 43 |
-
loader = PyPDFLoader(file_path)
|
| 44 |
-
elif filename.endswith(".docx"):
|
| 45 |
-
loader = Docx2txtLoader(file_path)
|
| 46 |
-
else:
|
| 47 |
-
continue # Skip unsupported file types
|
| 48 |
-
|
| 49 |
-
# Load and split document into chunks
|
| 50 |
-
document = loader.load()
|
| 51 |
-
chunks = text_splitter.split_documents(document)
|
| 52 |
-
|
| 53 |
-
# Add chunks to Chroma vector store
|
| 54 |
-
for chunk in chunks:
|
| 55 |
-
chunk_id = chroma.add_documents([chunk])
|
| 56 |
-
if chunk_id:
|
| 57 |
-
print(f"Chunk added with ID: {chunk_id}")
|
| 58 |
-
else:
|
| 59 |
-
print("Failed to add chunk")
|
| 60 |
-
|
| 61 |
-
print(f"Document {filename} added to database.")
|
| 62 |
-
|
| 63 |
-
print(f"Vector database created and saved in {db_name}.")
|
| 64 |
-
return chroma
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
if __name__ == "__main__":
|
| 68 |
-
# Path to the folder containing the documents
|
| 69 |
-
folder_path = "./data"
|
| 70 |
-
|
| 71 |
-
# Create the Chroma database
|
| 72 |
-
chroma = create_chroma_db(folder_path=folder_path)
|
| 73 |
-
|
| 74 |
-
# Create retriever from the Chroma database
|
| 75 |
-
retriever = chroma.as_retriever(search_kwargs={"k": 3})
|
| 76 |
-
|
| 77 |
-
# Perform a similarity search
|
| 78 |
-
query = "What's my company's mission and values"
|
| 79 |
-
similar_docs = retriever.invoke(query)
|
| 80 |
-
|
| 81 |
-
# Display results
|
| 82 |
-
for i, doc in enumerate(similar_docs, start=1):
|
| 83 |
-
print(f"\n🔹 Result {i}:\n{doc.page_content}\nTags: {doc.metadata.get('source', [])}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/service/agent_service.py
ADDED
|
@@ -0,0 +1,562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import inspect
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import re
|
| 6 |
+
from collections.abc import AsyncGenerator
|
| 7 |
+
from datetime import datetime, timezone
|
| 8 |
+
from typing import Any
|
| 9 |
+
from uuid import UUID
|
| 10 |
+
|
| 11 |
+
from fastapi import HTTPException
|
| 12 |
+
from langchain_core.messages import AIMessage, AIMessageChunk, AnyMessage, HumanMessage, ToolMessage
|
| 13 |
+
from langchain_core.runnables import RunnableConfig
|
| 14 |
+
from langfuse.langchain import CallbackHandler
|
| 15 |
+
from langgraph.types import Command, Interrupt
|
| 16 |
+
from langsmith import Client as LangsmithClient
|
| 17 |
+
from langsmith import uuid7
|
| 18 |
+
|
| 19 |
+
from agents import DEFAULT_AGENT, AgentGraph, get_agent
|
| 20 |
+
from agents.middlewares import FollowUpMiddleware, SafetyMiddleware, UNSAFE_RESPONSE
|
| 21 |
+
from agents.llama_guard import SafetyAssessment
|
| 22 |
+
from core import settings
|
| 23 |
+
from schema import (
|
| 24 |
+
ChatHistory,
|
| 25 |
+
ChatHistoryInput,
|
| 26 |
+
ChatMessage,
|
| 27 |
+
Feedback,
|
| 28 |
+
FeedbackResponse,
|
| 29 |
+
StreamInput,
|
| 30 |
+
ThreadList,
|
| 31 |
+
ThreadSummary,
|
| 32 |
+
UserInput,
|
| 33 |
+
)
|
| 34 |
+
from service.utils import (
|
| 35 |
+
convert_message_content_to_string,
|
| 36 |
+
langchain_to_chat_message,
|
| 37 |
+
remove_tool_calls,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
logger = logging.getLogger(__name__)
|
| 41 |
+
|
| 42 |
+
safety = SafetyMiddleware()
|
| 43 |
+
followup = FollowUpMiddleware()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
async def _handle_input(
|
| 47 |
+
user_input: UserInput, agent: AgentGraph, agent_id: str
|
| 48 |
+
) -> tuple[dict[str, Any], UUID]:
|
| 49 |
+
"""
|
| 50 |
+
Parse user input and handle any required interrupt resumption.
|
| 51 |
+
Returns kwargs for agent invocation and the run_id.
|
| 52 |
+
"""
|
| 53 |
+
run_id = uuid7()
|
| 54 |
+
thread_id = user_input.thread_id or str(uuid7())
|
| 55 |
+
user_id = user_input.user_id or str(uuid7())
|
| 56 |
+
|
| 57 |
+
# Namespace checkpoint by user so threads are user-scoped (Plan A)
|
| 58 |
+
checkpoint_thread_id = f"{user_id}:{thread_id}"
|
| 59 |
+
configurable = {"thread_id": checkpoint_thread_id, "user_id": user_id}
|
| 60 |
+
if user_input.model is not None:
|
| 61 |
+
configurable["model"] = user_input.model
|
| 62 |
+
|
| 63 |
+
callbacks: list[Any] = []
|
| 64 |
+
if settings.LANGFUSE_TRACING:
|
| 65 |
+
# Initialize Langfuse CallbackHandler for Langchain (tracing)
|
| 66 |
+
langfuse_handler = CallbackHandler()
|
| 67 |
+
callbacks.append(langfuse_handler)
|
| 68 |
+
|
| 69 |
+
if user_input.agent_config:
|
| 70 |
+
# Check for reserved keys (including 'model' even if not in configurable)
|
| 71 |
+
reserved_keys = {"thread_id", "user_id", "model"}
|
| 72 |
+
if overlap := reserved_keys & user_input.agent_config.keys():
|
| 73 |
+
raise HTTPException(
|
| 74 |
+
status_code=422,
|
| 75 |
+
detail=f"agent_config contains reserved keys: {overlap}",
|
| 76 |
+
)
|
| 77 |
+
configurable.update(user_input.agent_config)
|
| 78 |
+
|
| 79 |
+
config = RunnableConfig(
|
| 80 |
+
configurable=configurable,
|
| 81 |
+
metadata={
|
| 82 |
+
"thread_id": thread_id,
|
| 83 |
+
"user_id": user_id,
|
| 84 |
+
"agent_id": agent_id,
|
| 85 |
+
"model": user_input.model,
|
| 86 |
+
**user_input.agent_config,
|
| 87 |
+
},
|
| 88 |
+
run_id=run_id,
|
| 89 |
+
callbacks=callbacks,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# Check for interrupts that need to be resumed
|
| 93 |
+
state = await agent.aget_state(config=config)
|
| 94 |
+
interrupted_tasks = [
|
| 95 |
+
task for task in state.tasks if hasattr(task, "interrupts") and task.interrupts
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
input: Command | dict[str, Any]
|
| 99 |
+
if interrupted_tasks:
|
| 100 |
+
# assume user input is response to resume agent execution from interrupt
|
| 101 |
+
input = Command(resume=user_input.message)
|
| 102 |
+
else:
|
| 103 |
+
input = {"messages": [HumanMessage(content=user_input.message)]}
|
| 104 |
+
|
| 105 |
+
kwargs = {
|
| 106 |
+
"input": input,
|
| 107 |
+
"config": config,
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
return kwargs, run_id
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _create_ai_message(parts: dict) -> AIMessage:
|
| 114 |
+
sig = inspect.signature(AIMessage)
|
| 115 |
+
valid_keys = set(sig.parameters)
|
| 116 |
+
filtered = {k: v for k, v in parts.items() if k in valid_keys}
|
| 117 |
+
return AIMessage(**filtered)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
async def invoke_agent(user_input: UserInput, agent_id: str = DEFAULT_AGENT) -> ChatMessage:
|
| 121 |
+
agent: AgentGraph = get_agent(agent_id)
|
| 122 |
+
kwargs, run_id = await _handle_input(user_input, agent, agent_id)
|
| 123 |
+
|
| 124 |
+
# ── Input safety guard ──────────────────────────────────────────
|
| 125 |
+
input_check = await safety.check_input([HumanMessage(content=user_input.message)])
|
| 126 |
+
if input_check.safety_assessment == SafetyAssessment.UNSAFE:
|
| 127 |
+
raise HTTPException(
|
| 128 |
+
status_code=400,
|
| 129 |
+
detail=f"Request blocked by safety filter: {', '.join(input_check.unsafe_categories)}",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
try:
|
| 133 |
+
response_events: list[tuple[str, Any]] = await agent.ainvoke(**kwargs, stream_mode=["updates", "values"]) # type: ignore # fmt: skip
|
| 134 |
+
response_type, response = response_events[-1]
|
| 135 |
+
output: ChatMessage
|
| 136 |
+
|
| 137 |
+
if response_type == "values":
|
| 138 |
+
# Normal response, the agent completed successfully
|
| 139 |
+
output = langchain_to_chat_message(response["messages"][-1])
|
| 140 |
+
|
| 141 |
+
# ── Output safety guard ─────────────────────────────────
|
| 142 |
+
output_check = await safety.check_output(response["messages"])
|
| 143 |
+
if output_check.safety_assessment == SafetyAssessment.UNSAFE:
|
| 144 |
+
output = langchain_to_chat_message(
|
| 145 |
+
AIMessage(content=UNSAFE_RESPONSE)
|
| 146 |
+
)
|
| 147 |
+
elif response_type == "updates" and "__interrupt__" in response:
|
| 148 |
+
# The last thing to occur was an interrupt
|
| 149 |
+
# Return the value of the first interrupt as an AIMessage
|
| 150 |
+
output = langchain_to_chat_message(
|
| 151 |
+
AIMessage(content=response["__interrupt__"][0].value)
|
| 152 |
+
)
|
| 153 |
+
else:
|
| 154 |
+
raise ValueError(f"Unexpected response type: {response_type}")
|
| 155 |
+
|
| 156 |
+
output.run_id = str(run_id)
|
| 157 |
+
|
| 158 |
+
# ── Follow-up suggestions ──────────────────────────────────
|
| 159 |
+
questions = await followup.generate(response["messages"], kwargs["config"])
|
| 160 |
+
output.custom_data = {"follow_up": questions}
|
| 161 |
+
|
| 162 |
+
return output
|
| 163 |
+
except HTTPException:
|
| 164 |
+
raise
|
| 165 |
+
except Exception as e:
|
| 166 |
+
logger.error(f"An exception occurred: {e}")
|
| 167 |
+
raise HTTPException(status_code=500, detail="Unexpected error")
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
async def message_generator(
|
| 171 |
+
user_input: StreamInput, agent_id: str = DEFAULT_AGENT
|
| 172 |
+
) -> AsyncGenerator[str, None]:
|
| 173 |
+
agent: AgentGraph = get_agent(agent_id)
|
| 174 |
+
kwargs, run_id = await _handle_input(user_input, agent, agent_id)
|
| 175 |
+
|
| 176 |
+
# ── Input safety guard ──────────────────────────────────────────────
|
| 177 |
+
input_check = await safety.check_input([HumanMessage(content=user_input.message)])
|
| 178 |
+
if input_check.safety_assessment == SafetyAssessment.UNSAFE:
|
| 179 |
+
detail = f"Request blocked by safety filter: {', '.join(input_check.unsafe_categories)}"
|
| 180 |
+
yield f"data: {json.dumps({'type': 'error', 'content': detail})}\n\n"
|
| 181 |
+
yield "data: [DONE]\n\n"
|
| 182 |
+
return
|
| 183 |
+
|
| 184 |
+
try:
|
| 185 |
+
# Process streamed events from the graph and yield messages over the SSE stream.
|
| 186 |
+
async for stream_event in agent.astream(
|
| 187 |
+
**kwargs, stream_mode=["updates", "messages", "custom"], subgraphs=True
|
| 188 |
+
):
|
| 189 |
+
if not isinstance(stream_event, tuple):
|
| 190 |
+
continue
|
| 191 |
+
# Handle different stream event structures based on subgraphs
|
| 192 |
+
if len(stream_event) == 3:
|
| 193 |
+
# With subgraphs=True: (node_path, stream_mode, event)
|
| 194 |
+
_, stream_mode, event = stream_event
|
| 195 |
+
else:
|
| 196 |
+
# Without subgraphs: (stream_mode, event)
|
| 197 |
+
stream_mode, event = stream_event
|
| 198 |
+
new_messages = []
|
| 199 |
+
if stream_mode == "updates":
|
| 200 |
+
for node, updates in event.items():
|
| 201 |
+
if node == "__interrupt__":
|
| 202 |
+
interrupt: Interrupt
|
| 203 |
+
for interrupt in updates:
|
| 204 |
+
new_messages.append(AIMessage(content=interrupt.value))
|
| 205 |
+
continue
|
| 206 |
+
updates = updates or {}
|
| 207 |
+
update_messages = updates.get("messages", [])
|
| 208 |
+
# special cases for using langgraph-supervisor library
|
| 209 |
+
if "supervisor" in node or "sub-agent" in node:
|
| 210 |
+
# the only tools that come from the actual agent are the handoff and handback tools
|
| 211 |
+
if isinstance(update_messages[-1], ToolMessage):
|
| 212 |
+
if "sub-agent" in node and len(update_messages) > 1:
|
| 213 |
+
# If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result
|
| 214 |
+
update_messages = update_messages[-2:]
|
| 215 |
+
else:
|
| 216 |
+
# If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.
|
| 217 |
+
update_messages = [update_messages[-1]]
|
| 218 |
+
else:
|
| 219 |
+
update_messages = []
|
| 220 |
+
new_messages.extend(update_messages)
|
| 221 |
+
|
| 222 |
+
if stream_mode == "custom":
|
| 223 |
+
new_messages = [event]
|
| 224 |
+
|
| 225 |
+
processed_messages = []
|
| 226 |
+
current_message: dict[str, Any] = {}
|
| 227 |
+
for message in new_messages:
|
| 228 |
+
if isinstance(message, tuple):
|
| 229 |
+
key, value = message
|
| 230 |
+
# Store parts in temporary dict
|
| 231 |
+
current_message[key] = value
|
| 232 |
+
else:
|
| 233 |
+
# Add complete message if we have one in progress
|
| 234 |
+
if current_message:
|
| 235 |
+
processed_messages.append(_create_ai_message(current_message))
|
| 236 |
+
current_message = {}
|
| 237 |
+
processed_messages.append(message)
|
| 238 |
+
|
| 239 |
+
# Add any remaining message parts
|
| 240 |
+
if current_message:
|
| 241 |
+
processed_messages.append(_create_ai_message(current_message))
|
| 242 |
+
|
| 243 |
+
for message in processed_messages:
|
| 244 |
+
try:
|
| 245 |
+
chat_message = langchain_to_chat_message(message)
|
| 246 |
+
chat_message.run_id = str(run_id)
|
| 247 |
+
except Exception as e:
|
| 248 |
+
logger.error(f"Error parsing message: {e}")
|
| 249 |
+
yield f"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\n\n"
|
| 250 |
+
continue
|
| 251 |
+
# LangGraph re-sends the input message, which feels weird, so drop it
|
| 252 |
+
if chat_message.type == "human" and chat_message.content == user_input.message:
|
| 253 |
+
continue
|
| 254 |
+
yield f"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\n\n"
|
| 255 |
+
|
| 256 |
+
if stream_mode == "messages":
|
| 257 |
+
if not user_input.stream_tokens:
|
| 258 |
+
continue
|
| 259 |
+
msg, metadata = event
|
| 260 |
+
if "skip_stream" in metadata.get("tags", []):
|
| 261 |
+
continue
|
| 262 |
+
if not isinstance(msg, AIMessageChunk):
|
| 263 |
+
continue
|
| 264 |
+
# content = remove_tool_calls(msg.content)
|
| 265 |
+
content = msg.content
|
| 266 |
+
if content:
|
| 267 |
+
yield f"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\n\n"
|
| 268 |
+
except Exception as e:
|
| 269 |
+
logger.error(f"Error in message generator: {e}")
|
| 270 |
+
yield f"data: {json.dumps({'type': 'error', 'content': str(e)})}\n\n"
|
| 271 |
+
|
| 272 |
+
# ── Follow-up suggestions (after stream completes) ─────────────
|
| 273 |
+
try:
|
| 274 |
+
state = await agent.aget_state(config=kwargs["config"])
|
| 275 |
+
messages = state.values.get("messages", [])
|
| 276 |
+
questions = await followup.generate(messages, kwargs["config"])
|
| 277 |
+
followup_msg = ChatMessage(
|
| 278 |
+
type="custom",
|
| 279 |
+
content="",
|
| 280 |
+
custom_data={"follow_up": questions},
|
| 281 |
+
)
|
| 282 |
+
followup_msg.run_id = str(run_id)
|
| 283 |
+
yield f"data: {json.dumps({'type': 'message', 'content': followup_msg.model_dump()})}\n\n"
|
| 284 |
+
except Exception as e:
|
| 285 |
+
logger.warning(f"Follow-up generation failed in stream: {e}")
|
| 286 |
+
finally:
|
| 287 |
+
yield "data: [DONE]\n\n"
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
async def submit_feedback(feedback: Feedback) -> FeedbackResponse:
|
| 291 |
+
client = LangsmithClient()
|
| 292 |
+
kwargs = feedback.kwargs or {}
|
| 293 |
+
client.create_feedback(
|
| 294 |
+
run_id=feedback.run_id,
|
| 295 |
+
key=feedback.key,
|
| 296 |
+
score=feedback.score,
|
| 297 |
+
**kwargs,
|
| 298 |
+
)
|
| 299 |
+
return FeedbackResponse()
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def _checkpoint_thread_id(user_id: str, thread_id: str) -> str:
|
| 303 |
+
"""Composite key so checkpoints are user-scoped."""
|
| 304 |
+
return f"{user_id}:{thread_id}"
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
async def get_history(input: ChatHistoryInput) -> ChatHistory:
|
| 308 |
+
if not (input.user_id or "").strip():
|
| 309 |
+
raise HTTPException(
|
| 310 |
+
status_code=422,
|
| 311 |
+
detail="user_id is required and must be non-empty",
|
| 312 |
+
)
|
| 313 |
+
# TODO: Hard-coding DEFAULT_AGENT here is wonky
|
| 314 |
+
agent: AgentGraph = get_agent(DEFAULT_AGENT)
|
| 315 |
+
checkpoint_thread_id = _checkpoint_thread_id(input.user_id, input.thread_id)
|
| 316 |
+
try:
|
| 317 |
+
state_snapshot = await agent.aget_state(
|
| 318 |
+
config=RunnableConfig(
|
| 319 |
+
configurable={
|
| 320 |
+
"thread_id": checkpoint_thread_id,
|
| 321 |
+
"user_id": input.user_id,
|
| 322 |
+
},
|
| 323 |
+
metadata={
|
| 324 |
+
"thread_id": input.thread_id,
|
| 325 |
+
"user_id": input.user_id,
|
| 326 |
+
"agent_id": DEFAULT_AGENT,
|
| 327 |
+
},
|
| 328 |
+
)
|
| 329 |
+
)
|
| 330 |
+
messages: list[AnyMessage] = state_snapshot.values["messages"]
|
| 331 |
+
chat_messages: list[ChatMessage] = [langchain_to_chat_message(m) for m in messages]
|
| 332 |
+
return ChatHistory(messages=chat_messages)
|
| 333 |
+
except Exception as e:
|
| 334 |
+
logger.error(f"An exception occurred: {e}")
|
| 335 |
+
raise HTTPException(status_code=500, detail="Unexpected error")
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def _iso_ts(dt: datetime | str | None) -> str | None:
|
| 339 |
+
"""Format datetime as ISO 8601 or return None."""
|
| 340 |
+
if dt is None:
|
| 341 |
+
return None
|
| 342 |
+
if isinstance(dt, str):
|
| 343 |
+
return dt
|
| 344 |
+
if dt.tzinfo is None:
|
| 345 |
+
dt = dt.replace(tzinfo=timezone.utc)
|
| 346 |
+
return dt.isoformat()
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
async def list_threads(
|
| 350 |
+
user_id: str,
|
| 351 |
+
*,
|
| 352 |
+
limit: int = 20,
|
| 353 |
+
offset: int = 0,
|
| 354 |
+
search: str | None = None,
|
| 355 |
+
) -> ThreadList:
|
| 356 |
+
"""
|
| 357 |
+
List thread IDs for a user by querying the checkpointer storage with prefix user_id:.
|
| 358 |
+
Returns logical thread_ids (without the user prefix), with updated_at when available.
|
| 359 |
+
Supports pagination (offset/limit) and optional search filter.
|
| 360 |
+
"""
|
| 361 |
+
if not (user_id or "").strip():
|
| 362 |
+
return ThreadList(threads=[], total=0)
|
| 363 |
+
agent: AgentGraph = get_agent(DEFAULT_AGENT)
|
| 364 |
+
checkpointer = getattr(agent, "checkpointer", None)
|
| 365 |
+
if checkpointer is None:
|
| 366 |
+
return ThreadList(threads=[], total=0)
|
| 367 |
+
prefix = f"{user_id}:"
|
| 368 |
+
# List of (logical_id, updated_at_iso | None)
|
| 369 |
+
rows: list[tuple[str, str | None]] = []
|
| 370 |
+
try:
|
| 371 |
+
# MongoDB: aggregation for thread_id + max timestamp
|
| 372 |
+
if hasattr(checkpointer, "checkpoint_collection"):
|
| 373 |
+
coll = checkpointer.checkpoint_collection
|
| 374 |
+
try:
|
| 375 |
+
pipeline = [
|
| 376 |
+
{"$match": {"thread_id": {"$regex": f"^{re.escape(user_id)}:"}}},
|
| 377 |
+
{
|
| 378 |
+
"$group": {
|
| 379 |
+
"_id": "$thread_id",
|
| 380 |
+
"ts": {"$max": {"$ifNull": ["$ts", "$updated_at", "$created_at"]}},
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
]
|
| 384 |
+
async for doc in coll.aggregate(pipeline):
|
| 385 |
+
tid = doc.get("_id")
|
| 386 |
+
if not isinstance(tid, str) or not tid.startswith(prefix):
|
| 387 |
+
continue
|
| 388 |
+
logical = tid[len(prefix) :]
|
| 389 |
+
ts = doc.get("ts")
|
| 390 |
+
rows.append((logical, _iso_ts(ts) if ts else None))
|
| 391 |
+
except Exception as mongo_err:
|
| 392 |
+
logger.debug(
|
| 393 |
+
"MongoDB aggregation failed, listing by thread_id only: %s",
|
| 394 |
+
mongo_err,
|
| 395 |
+
)
|
| 396 |
+
raw_ids = await coll.distinct(
|
| 397 |
+
"thread_id",
|
| 398 |
+
{"thread_id": {"$regex": f"^{re.escape(user_id)}:"}},
|
| 399 |
+
)
|
| 400 |
+
for tid in raw_ids:
|
| 401 |
+
if isinstance(tid, str) and tid.startswith(prefix):
|
| 402 |
+
rows.append((tid[len(prefix) :], None))
|
| 403 |
+
# Postgres: GROUP BY thread_id, MAX(ts)
|
| 404 |
+
elif hasattr(checkpointer, "pool") or hasattr(checkpointer, "conn"):
|
| 405 |
+
# AsyncPostgresSaver often uses .pool, but can have .conn
|
| 406 |
+
pool = getattr(checkpointer, "pool", getattr(checkpointer, "conn", None))
|
| 407 |
+
# If it's a pool, we need an async connection
|
| 408 |
+
if hasattr(pool, "connection"):
|
| 409 |
+
conn_ctx = pool.connection()
|
| 410 |
+
else:
|
| 411 |
+
conn_ctx = pool # Assume it's already a connection or manages its own
|
| 412 |
+
|
| 413 |
+
async with conn_ctx as conn:
|
| 414 |
+
async with conn.cursor() as cur:
|
| 415 |
+
try:
|
| 416 |
+
# Try various common timestamp column names
|
| 417 |
+
await cur.execute(
|
| 418 |
+
"""
|
| 419 |
+
SELECT thread_id, MAX(COALESCE(ts, updated_at, created_at)) AS ts
|
| 420 |
+
FROM checkpoints
|
| 421 |
+
WHERE thread_id LIKE %s
|
| 422 |
+
GROUP BY thread_id
|
| 423 |
+
""",
|
| 424 |
+
(prefix + "%",),
|
| 425 |
+
)
|
| 426 |
+
except Exception as pg_err:
|
| 427 |
+
logger.debug(
|
| 428 |
+
"Postgres MAX(ts) failed, trying metadata: %s",
|
| 429 |
+
pg_err,
|
| 430 |
+
)
|
| 431 |
+
try:
|
| 432 |
+
await cur.execute(
|
| 433 |
+
"""
|
| 434 |
+
SELECT thread_id, MAX(created_at) AS ts
|
| 435 |
+
FROM checkpoint_metadata
|
| 436 |
+
WHERE thread_id LIKE %s
|
| 437 |
+
GROUP BY thread_id
|
| 438 |
+
""",
|
| 439 |
+
(prefix + "%",),
|
| 440 |
+
)
|
| 441 |
+
except Exception:
|
| 442 |
+
await cur.execute(
|
| 443 |
+
"SELECT DISTINCT thread_id FROM checkpoints WHERE thread_id LIKE %s",
|
| 444 |
+
(prefix + "%",),
|
| 445 |
+
)
|
| 446 |
+
for row in await cur.fetchall():
|
| 447 |
+
raw = (
|
| 448 |
+
row.get("thread_id")
|
| 449 |
+
if isinstance(row, dict)
|
| 450 |
+
else (row[0] if row else None)
|
| 451 |
+
)
|
| 452 |
+
if isinstance(raw, str) and raw.startswith(prefix):
|
| 453 |
+
rows.append((raw[len(prefix) :], None))
|
| 454 |
+
else:
|
| 455 |
+
for row in await cur.fetchall():
|
| 456 |
+
raw = row.get("thread_id") if isinstance(row, dict) else row[0]
|
| 457 |
+
ts_val = row.get("ts") if isinstance(row, dict) else row[1]
|
| 458 |
+
if isinstance(raw, str) and raw.startswith(prefix):
|
| 459 |
+
rows.append((raw[len(prefix) :], _iso_ts(ts_val)))
|
| 460 |
+
else:
|
| 461 |
+
for row in await cur.fetchall():
|
| 462 |
+
raw = (
|
| 463 |
+
row.get("thread_id")
|
| 464 |
+
if isinstance(row, dict)
|
| 465 |
+
else (row[0] if row else None)
|
| 466 |
+
)
|
| 467 |
+
ts_val = (
|
| 468 |
+
row.get("ts")
|
| 469 |
+
if isinstance(row, dict)
|
| 470 |
+
else (
|
| 471 |
+
row[1]
|
| 472 |
+
if isinstance(row, (list, tuple)) and len(row) > 1
|
| 473 |
+
else None
|
| 474 |
+
)
|
| 475 |
+
)
|
| 476 |
+
if isinstance(raw, str) and raw.startswith(prefix):
|
| 477 |
+
rows.append((raw[len(prefix) :], _iso_ts(ts_val)))
|
| 478 |
+
# SQLite: GROUP BY thread_id, MAX(ts)
|
| 479 |
+
elif hasattr(checkpointer, "conn"):
|
| 480 |
+
conn = checkpointer.conn
|
| 481 |
+
try:
|
| 482 |
+
cursor = await conn.execute(
|
| 483 |
+
"""
|
| 484 |
+
SELECT thread_id, MAX(COALESCE(ts, updated_at, created_at)) AS ts
|
| 485 |
+
FROM checkpoints
|
| 486 |
+
WHERE thread_id LIKE ?
|
| 487 |
+
GROUP BY thread_id
|
| 488 |
+
""",
|
| 489 |
+
(prefix + "%",),
|
| 490 |
+
)
|
| 491 |
+
for row in await cursor.fetchall():
|
| 492 |
+
raw = row[0] if isinstance(row, (list, tuple)) else row
|
| 493 |
+
ts_val = row[1] if isinstance(row, (list, tuple)) and len(row) > 1 else None
|
| 494 |
+
if isinstance(raw, str) and raw.startswith(prefix):
|
| 495 |
+
rows.append((raw[len(prefix) :], _iso_ts(ts_val)))
|
| 496 |
+
except Exception as sqlite_err:
|
| 497 |
+
logger.debug("SQLite MAX(ts) failed, listing by thread_id only: %s", sqlite_err)
|
| 498 |
+
cursor = await conn.execute(
|
| 499 |
+
"SELECT DISTINCT thread_id FROM checkpoints WHERE thread_id LIKE ?",
|
| 500 |
+
(prefix + "%",),
|
| 501 |
+
)
|
| 502 |
+
for row in await cursor.fetchall():
|
| 503 |
+
raw = row[0] if isinstance(row, (list, tuple)) else row
|
| 504 |
+
if isinstance(raw, str) and raw.startswith(prefix):
|
| 505 |
+
rows.append((raw[len(prefix) :], None))
|
| 506 |
+
else:
|
| 507 |
+
logger.warning("Unknown checkpointer type; cannot list threads by prefix")
|
| 508 |
+
except Exception as e:
|
| 509 |
+
logger.error(f"Error listing threads for user: {e}")
|
| 510 |
+
raise HTTPException(status_code=500, detail="Failed to list threads") from e
|
| 511 |
+
|
| 512 |
+
# Sort by updated_at desc (None last), then by thread_id
|
| 513 |
+
def _sort_key(item: tuple[str, str | None]) -> tuple[bool, str, str]:
|
| 514 |
+
logical, ts = item
|
| 515 |
+
# desc sort: flip characters in timestamp if it exists
|
| 516 |
+
return (ts is None, (ts or "")[::-1], logical)
|
| 517 |
+
|
| 518 |
+
rows.sort(key=_sort_key)
|
| 519 |
+
|
| 520 |
+
# Filter by search
|
| 521 |
+
search_clean = (search or "").strip().lower()
|
| 522 |
+
if search_clean:
|
| 523 |
+
rows = [(logical, ts) for logical, ts in rows if search_clean in logical.lower()]
|
| 524 |
+
|
| 525 |
+
total = len(rows)
|
| 526 |
+
rows = rows[offset : offset + limit]
|
| 527 |
+
|
| 528 |
+
# Fetch additional details (preview, precise timestamp) for the requested page in parallel
|
| 529 |
+
async def get_thread_summary(logical_id: str, existing_ts: str | None) -> ThreadSummary:
|
| 530 |
+
try:
|
| 531 |
+
config = RunnableConfig(
|
| 532 |
+
configurable={
|
| 533 |
+
"thread_id": f"{user_id}:{logical_id}",
|
| 534 |
+
"user_id": user_id,
|
| 535 |
+
}
|
| 536 |
+
)
|
| 537 |
+
state = await agent.aget_state(config)
|
| 538 |
+
preview = None
|
| 539 |
+
ts = existing_ts
|
| 540 |
+
|
| 541 |
+
if state.values and "messages" in state.values:
|
| 542 |
+
msgs = state.values["messages"]
|
| 543 |
+
if msgs:
|
| 544 |
+
# Get the last non-custom message for better preview
|
| 545 |
+
last_msg = msgs[-1]
|
| 546 |
+
preview = convert_message_content_to_string(last_msg.content)
|
| 547 |
+
if preview and len(preview) > 120:
|
| 548 |
+
preview = preview[:117] + "..."
|
| 549 |
+
|
| 550 |
+
# If we don't have a timestamp, try to get it from state metadata
|
| 551 |
+
if not ts and state.metadata:
|
| 552 |
+
m_ts = state.metadata.get("ts") or state.metadata.get("created_at")
|
| 553 |
+
if m_ts:
|
| 554 |
+
ts = _iso_ts(m_ts)
|
| 555 |
+
|
| 556 |
+
return ThreadSummary(thread_id=logical_id, updated_at=ts, preview=preview)
|
| 557 |
+
except Exception as e:
|
| 558 |
+
logger.warning(f"Failed to fetch state for thread {logical_id}: {e}")
|
| 559 |
+
return ThreadSummary(thread_id=logical_id, updated_at=existing_ts, preview=None)
|
| 560 |
+
|
| 561 |
+
summaries = await asyncio.gather(*[get_thread_summary(logical, ts) for logical, ts in rows])
|
| 562 |
+
return ThreadList(threads=list(summaries), total=total)
|
src/service/dependencies.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Annotated
|
| 2 |
+
|
| 3 |
+
from fastapi import Depends, HTTPException, status
|
| 4 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
| 5 |
+
|
| 6 |
+
from core import settings
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def verify_bearer(
|
| 10 |
+
http_auth: Annotated[
|
| 11 |
+
HTTPAuthorizationCredentials | None,
|
| 12 |
+
Depends(
|
| 13 |
+
HTTPBearer(
|
| 14 |
+
description="Please provide AUTH_SECRET api key", auto_error=False
|
| 15 |
+
)
|
| 16 |
+
),
|
| 17 |
+
],
|
| 18 |
+
) -> None:
|
| 19 |
+
if not settings.AUTH_SECRET:
|
| 20 |
+
return
|
| 21 |
+
auth_secret = settings.AUTH_SECRET.get_secret_value()
|
| 22 |
+
if not http_auth or http_auth.credentials != auth_secret:
|
| 23 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
|
src/service/router.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
from fastapi import APIRouter, Depends, status
|
| 4 |
+
from fastapi.responses import StreamingResponse
|
| 5 |
+
|
| 6 |
+
from agents import DEFAULT_AGENT, get_all_agent_info
|
| 7 |
+
from core import settings
|
| 8 |
+
from schema import (
|
| 9 |
+
ChatHistory,
|
| 10 |
+
ChatHistoryInput,
|
| 11 |
+
ChatMessage,
|
| 12 |
+
Feedback,
|
| 13 |
+
FeedbackResponse,
|
| 14 |
+
ServiceMetadata,
|
| 15 |
+
StreamInput,
|
| 16 |
+
ThreadList,
|
| 17 |
+
ThreadListInput,
|
| 18 |
+
UserInput,
|
| 19 |
+
)
|
| 20 |
+
from service import agent_service
|
| 21 |
+
from service.dependencies import verify_bearer
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
router = APIRouter(dependencies=[Depends(verify_bearer)])
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _sse_response_example() -> dict[int | str, Any]:
|
| 28 |
+
return {
|
| 29 |
+
status.HTTP_200_OK: {
|
| 30 |
+
"description": "Server Sent Event Response",
|
| 31 |
+
"content": {
|
| 32 |
+
"text/event-stream": {
|
| 33 |
+
"example": "data: {'type': 'token', 'content': 'Hello'}\n\ndata: {'type': 'token', 'content': ' World'}\n\ndata: [DONE]\n\n",
|
| 34 |
+
"schema": {"type": "string"},
|
| 35 |
+
}
|
| 36 |
+
},
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@router.get("/info")
|
| 42 |
+
async def info() -> ServiceMetadata:
|
| 43 |
+
models = list(settings.AVAILABLE_MODELS)
|
| 44 |
+
models.sort()
|
| 45 |
+
return ServiceMetadata(
|
| 46 |
+
agents=get_all_agent_info(),
|
| 47 |
+
models=models,
|
| 48 |
+
default_agent=DEFAULT_AGENT,
|
| 49 |
+
default_model=settings.DEFAULT_MODEL,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@router.post("/{agent_id}/invoke", operation_id="invoke_with_agent_id")
|
| 54 |
+
@router.post("/invoke")
|
| 55 |
+
async def invoke(
|
| 56 |
+
user_input: UserInput, agent_id: str = DEFAULT_AGENT
|
| 57 |
+
) -> ChatMessage:
|
| 58 |
+
"""
|
| 59 |
+
Invoke an agent with user input to retrieve a final response.
|
| 60 |
+
|
| 61 |
+
If agent_id is not provided, the default agent will be used.
|
| 62 |
+
Use thread_id to persist and continue a multi-turn conversation. run_id kwarg
|
| 63 |
+
is also attached to messages for recording feedback.
|
| 64 |
+
Use user_id to persist and continue a conversation across multiple threads.
|
| 65 |
+
"""
|
| 66 |
+
return await agent_service.invoke_agent(user_input, agent_id)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@router.post(
|
| 70 |
+
"/{agent_id}/stream",
|
| 71 |
+
response_class=StreamingResponse,
|
| 72 |
+
responses=_sse_response_example(),
|
| 73 |
+
operation_id="stream_with_agent_id",
|
| 74 |
+
)
|
| 75 |
+
@router.post(
|
| 76 |
+
"/stream", response_class=StreamingResponse, responses=_sse_response_example()
|
| 77 |
+
)
|
| 78 |
+
async def stream(
|
| 79 |
+
user_input: StreamInput, agent_id: str = DEFAULT_AGENT
|
| 80 |
+
) -> StreamingResponse:
|
| 81 |
+
"""
|
| 82 |
+
Stream an agent's response to a user input, including intermediate messages and tokens.
|
| 83 |
+
|
| 84 |
+
If agent_id is not provided, the default agent will be used.
|
| 85 |
+
Use thread_id to persist and continue a multi-turn conversation. run_id kwarg
|
| 86 |
+
is also attached to all messages for recording feedback.
|
| 87 |
+
Use user_id to persist and continue a conversation across multiple threads.
|
| 88 |
+
|
| 89 |
+
Set `stream_tokens=false` to return intermediate messages but not token-by-token.
|
| 90 |
+
"""
|
| 91 |
+
return StreamingResponse(
|
| 92 |
+
agent_service.message_generator(user_input, agent_id),
|
| 93 |
+
media_type="text/event-stream",
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@router.post("/feedback")
|
| 98 |
+
async def feedback(feedback: Feedback) -> FeedbackResponse:
|
| 99 |
+
"""
|
| 100 |
+
Record feedback for a run to LangSmith.
|
| 101 |
+
|
| 102 |
+
This is a simple wrapper for the LangSmith create_feedback API, so the
|
| 103 |
+
credentials can be stored and managed in the service rather than the client.
|
| 104 |
+
See: https://api.smith.langchain.com/redoc#tag/feedback/operation/create_feedback_api_v1_feedback_post
|
| 105 |
+
"""
|
| 106 |
+
return await agent_service.submit_feedback(feedback)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@router.post("/history")
|
| 110 |
+
async def history(input: ChatHistoryInput) -> ChatHistory:
|
| 111 |
+
"""
|
| 112 |
+
Get chat history for a thread. Requires user_id and thread_id.
|
| 113 |
+
Returns only messages for the given user's thread.
|
| 114 |
+
"""
|
| 115 |
+
return await agent_service.get_history(input)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@router.post("/history/threads")
|
| 119 |
+
async def history_threads(input: ThreadListInput) -> ThreadList:
|
| 120 |
+
"""
|
| 121 |
+
List conversation threads for a user. Requires user_id.
|
| 122 |
+
Returns thread IDs (and optional metadata) scoped to that user.
|
| 123 |
+
Supports limit, offset, and optional search filter.
|
| 124 |
+
"""
|
| 125 |
+
return await agent_service.list_threads(
|
| 126 |
+
input.user_id,
|
| 127 |
+
limit=input.limit,
|
| 128 |
+
offset=input.offset,
|
| 129 |
+
search=input.search,
|
| 130 |
+
)
|
src/service/service.py
CHANGED
|
@@ -1,46 +1,18 @@
|
|
| 1 |
-
import inspect
|
| 2 |
-
import json
|
| 3 |
import logging
|
| 4 |
import warnings
|
| 5 |
from collections.abc import AsyncGenerator
|
| 6 |
from contextlib import asynccontextmanager
|
| 7 |
-
from typing import Annotated, Any
|
| 8 |
-
from langsmith import uuid7
|
| 9 |
-
from uuid import UUID
|
| 10 |
|
| 11 |
-
from fastapi import
|
| 12 |
-
from fastapi.responses import StreamingResponse
|
| 13 |
-
from fastapi.routing import APIRoute
|
| 14 |
from fastapi.middleware.cors import CORSMiddleware
|
| 15 |
-
from fastapi.
|
| 16 |
from langchain_core._api import LangChainBetaWarning
|
| 17 |
-
from
|
| 18 |
-
from langchain_core.runnables import RunnableConfig
|
| 19 |
-
from langfuse import Langfuse # type: ignore[import-untyped]
|
| 20 |
-
from langfuse.langchain import (
|
| 21 |
-
CallbackHandler, # type: ignore[import-untyped]
|
| 22 |
-
)
|
| 23 |
-
from langgraph.types import Command, Interrupt
|
| 24 |
-
from langsmith import Client as LangsmithClient
|
| 25 |
|
| 26 |
-
from agents import
|
| 27 |
from core import settings
|
| 28 |
from memory import initialize_database, initialize_store
|
| 29 |
-
from
|
| 30 |
-
ChatHistory,
|
| 31 |
-
ChatHistoryInput,
|
| 32 |
-
ChatMessage,
|
| 33 |
-
Feedback,
|
| 34 |
-
FeedbackResponse,
|
| 35 |
-
ServiceMetadata,
|
| 36 |
-
StreamInput,
|
| 37 |
-
UserInput,
|
| 38 |
-
)
|
| 39 |
-
from service.utils import (
|
| 40 |
-
convert_message_content_to_string,
|
| 41 |
-
langchain_to_chat_message,
|
| 42 |
-
remove_tool_calls,
|
| 43 |
-
)
|
| 44 |
|
| 45 |
warnings.filterwarnings("ignore", category=LangChainBetaWarning)
|
| 46 |
logger = logging.getLogger(__name__)
|
|
@@ -51,19 +23,6 @@ def custom_generate_unique_id(route: APIRoute) -> str:
|
|
| 51 |
return route.name
|
| 52 |
|
| 53 |
|
| 54 |
-
def verify_bearer(
|
| 55 |
-
http_auth: Annotated[
|
| 56 |
-
HTTPAuthorizationCredentials | None,
|
| 57 |
-
Depends(HTTPBearer(description="Please provide AUTH_SECRET api key.", auto_error=False)),
|
| 58 |
-
],
|
| 59 |
-
) -> None:
|
| 60 |
-
if not settings.AUTH_SECRET:
|
| 61 |
-
return
|
| 62 |
-
auth_secret = settings.AUTH_SECRET.get_secret_value()
|
| 63 |
-
if not http_auth or http_auth.credentials != auth_secret:
|
| 64 |
-
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
|
| 65 |
-
|
| 66 |
-
|
| 67 |
@asynccontextmanager
|
| 68 |
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
| 69 |
"""
|
|
@@ -101,330 +60,19 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
|
| 101 |
raise
|
| 102 |
|
| 103 |
|
| 104 |
-
app = FastAPI(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
app.add_middleware(
|
| 106 |
CORSMiddleware,
|
| 107 |
allow_origins=settings.CORS_ORIGINS,
|
| 108 |
allow_credentials=True,
|
| 109 |
allow_methods=["*"],
|
| 110 |
allow_headers=["*"],
|
|
|
|
| 111 |
)
|
| 112 |
-
router = APIRouter(dependencies=[Depends(verify_bearer)])
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
@router.get("/info")
|
| 116 |
-
async def info() -> ServiceMetadata:
|
| 117 |
-
models = list(settings.AVAILABLE_MODELS)
|
| 118 |
-
models.sort()
|
| 119 |
-
return ServiceMetadata(
|
| 120 |
-
agents=get_all_agent_info(),
|
| 121 |
-
models=models,
|
| 122 |
-
default_agent=DEFAULT_AGENT,
|
| 123 |
-
default_model=settings.DEFAULT_MODEL,
|
| 124 |
-
)
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
async def _handle_input(user_input: UserInput, agent: AgentGraph) -> tuple[dict[str, Any], UUID]:
|
| 128 |
-
"""
|
| 129 |
-
Parse user input and handle any required interrupt resumption.
|
| 130 |
-
Returns kwargs for agent invocation and the run_id.
|
| 131 |
-
"""
|
| 132 |
-
run_id = uuid7()
|
| 133 |
-
thread_id = user_input.thread_id or str(uuid7())
|
| 134 |
-
user_id = user_input.user_id or str(uuid7())
|
| 135 |
-
|
| 136 |
-
configurable = {"thread_id": thread_id, "user_id": user_id}
|
| 137 |
-
if user_input.model is not None:
|
| 138 |
-
configurable["model"] = user_input.model
|
| 139 |
-
|
| 140 |
-
callbacks: list[Any] = []
|
| 141 |
-
if settings.LANGFUSE_TRACING:
|
| 142 |
-
# Initialize Langfuse CallbackHandler for Langchain (tracing)
|
| 143 |
-
langfuse_handler = CallbackHandler()
|
| 144 |
-
|
| 145 |
-
callbacks.append(langfuse_handler)
|
| 146 |
-
|
| 147 |
-
if user_input.agent_config:
|
| 148 |
-
# Check for reserved keys (including 'model' even if not in configurable)
|
| 149 |
-
reserved_keys = {"thread_id", "user_id", "model"}
|
| 150 |
-
if overlap := reserved_keys & user_input.agent_config.keys():
|
| 151 |
-
raise HTTPException(
|
| 152 |
-
status_code=422,
|
| 153 |
-
detail=f"agent_config contains reserved keys: {overlap}",
|
| 154 |
-
)
|
| 155 |
-
configurable.update(user_input.agent_config)
|
| 156 |
-
|
| 157 |
-
config = RunnableConfig(
|
| 158 |
-
configurable=configurable,
|
| 159 |
-
run_id=run_id,
|
| 160 |
-
callbacks=callbacks,
|
| 161 |
-
)
|
| 162 |
-
|
| 163 |
-
# Check for interrupts that need to be resumed
|
| 164 |
-
state = await agent.aget_state(config=config)
|
| 165 |
-
interrupted_tasks = [
|
| 166 |
-
task for task in state.tasks if hasattr(task, "interrupts") and task.interrupts
|
| 167 |
-
]
|
| 168 |
-
|
| 169 |
-
input: Command | dict[str, Any]
|
| 170 |
-
if interrupted_tasks:
|
| 171 |
-
# assume user input is response to resume agent execution from interrupt
|
| 172 |
-
input = Command(resume=user_input.message)
|
| 173 |
-
else:
|
| 174 |
-
input = {"messages": [HumanMessage(content=user_input.message)]}
|
| 175 |
-
|
| 176 |
-
kwargs = {
|
| 177 |
-
"input": input,
|
| 178 |
-
"config": config,
|
| 179 |
-
}
|
| 180 |
-
|
| 181 |
-
return kwargs, run_id
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
@router.post("/{agent_id}/invoke", operation_id="invoke_with_agent_id")
|
| 185 |
-
@router.post("/invoke")
|
| 186 |
-
async def invoke(user_input: UserInput, agent_id: str = DEFAULT_AGENT) -> ChatMessage:
|
| 187 |
-
"""
|
| 188 |
-
Invoke an agent with user input to retrieve a final response.
|
| 189 |
-
|
| 190 |
-
If agent_id is not provided, the default agent will be used.
|
| 191 |
-
Use thread_id to persist and continue a multi-turn conversation. run_id kwarg
|
| 192 |
-
is also attached to messages for recording feedback.
|
| 193 |
-
Use user_id to persist and continue a conversation across multiple threads.
|
| 194 |
-
"""
|
| 195 |
-
# NOTE: Currently this only returns the last message or interrupt.
|
| 196 |
-
# In the case of an agent outputting multiple AIMessages (such as the background step
|
| 197 |
-
# in interrupt-agent, or a tool step in research-assistant), it's omitted. Arguably,
|
| 198 |
-
# you'd want to include it. You could update the API to return a list of ChatMessages
|
| 199 |
-
# in that case.
|
| 200 |
-
agent: AgentGraph = get_agent(agent_id)
|
| 201 |
-
kwargs, run_id = await _handle_input(user_input, agent)
|
| 202 |
-
|
| 203 |
-
try:
|
| 204 |
-
response_events: list[tuple[str, Any]] = await agent.ainvoke(**kwargs, stream_mode=["updates", "values"]) # type: ignore # fmt: skip
|
| 205 |
-
response_type, response = response_events[-1]
|
| 206 |
-
if response_type == "values":
|
| 207 |
-
# Normal response, the agent completed successfully
|
| 208 |
-
output = langchain_to_chat_message(response["messages"][-1])
|
| 209 |
-
elif response_type == "updates" and "__interrupt__" in response:
|
| 210 |
-
# The last thing to occur was an interrupt
|
| 211 |
-
# Return the value of the first interrupt as an AIMessage
|
| 212 |
-
output = langchain_to_chat_message(
|
| 213 |
-
AIMessage(content=response["__interrupt__"][0].value)
|
| 214 |
-
)
|
| 215 |
-
else:
|
| 216 |
-
raise ValueError(f"Unexpected response type: {response_type}")
|
| 217 |
-
|
| 218 |
-
output.run_id = str(run_id)
|
| 219 |
-
return output
|
| 220 |
-
except Exception as e:
|
| 221 |
-
logger.error(f"An exception occurred: {e}")
|
| 222 |
-
raise HTTPException(status_code=500, detail="Unexpected error")
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
async def message_generator(
|
| 226 |
-
user_input: StreamInput, agent_id: str = DEFAULT_AGENT
|
| 227 |
-
) -> AsyncGenerator[str, None]:
|
| 228 |
-
"""
|
| 229 |
-
Generate a stream of messages from the agent.
|
| 230 |
-
|
| 231 |
-
This is the workhorse method for the /stream endpoint.
|
| 232 |
-
"""
|
| 233 |
-
agent: AgentGraph = get_agent(agent_id)
|
| 234 |
-
kwargs, run_id = await _handle_input(user_input, agent)
|
| 235 |
-
|
| 236 |
-
try:
|
| 237 |
-
# Process streamed events from the graph and yield messages over the SSE stream.
|
| 238 |
-
async for stream_event in agent.astream(
|
| 239 |
-
**kwargs, stream_mode=["updates", "messages", "custom"], subgraphs=True
|
| 240 |
-
):
|
| 241 |
-
if not isinstance(stream_event, tuple):
|
| 242 |
-
continue
|
| 243 |
-
# Handle different stream event structures based on subgraphs
|
| 244 |
-
if len(stream_event) == 3:
|
| 245 |
-
# With subgraphs=True: (node_path, stream_mode, event)
|
| 246 |
-
_, stream_mode, event = stream_event
|
| 247 |
-
else:
|
| 248 |
-
# Without subgraphs: (stream_mode, event)
|
| 249 |
-
stream_mode, event = stream_event
|
| 250 |
-
new_messages = []
|
| 251 |
-
if stream_mode == "updates":
|
| 252 |
-
for node, updates in event.items():
|
| 253 |
-
# A simple approach to handle agent interrupts.
|
| 254 |
-
# In a more sophisticated implementation, we could add
|
| 255 |
-
# some structured ChatMessage type to return the interrupt value.
|
| 256 |
-
if node == "__interrupt__":
|
| 257 |
-
interrupt: Interrupt
|
| 258 |
-
for interrupt in updates:
|
| 259 |
-
new_messages.append(AIMessage(content=interrupt.value))
|
| 260 |
-
continue
|
| 261 |
-
updates = updates or {}
|
| 262 |
-
update_messages = updates.get("messages", [])
|
| 263 |
-
# special cases for using langgraph-supervisor library
|
| 264 |
-
if "supervisor" in node or "sub-agent" in node:
|
| 265 |
-
# the only tools that come from the actual agent are the handoff and handback tools
|
| 266 |
-
if isinstance(update_messages[-1], ToolMessage):
|
| 267 |
-
if "sub-agent" in node and len(update_messages) > 1:
|
| 268 |
-
# If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result
|
| 269 |
-
update_messages = update_messages[-2:]
|
| 270 |
-
else:
|
| 271 |
-
# If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.
|
| 272 |
-
update_messages = [update_messages[-1]]
|
| 273 |
-
else:
|
| 274 |
-
update_messages = []
|
| 275 |
-
new_messages.extend(update_messages)
|
| 276 |
-
|
| 277 |
-
if stream_mode == "custom":
|
| 278 |
-
new_messages = [event]
|
| 279 |
-
|
| 280 |
-
# Send update events for non-message updates (like follow_up)
|
| 281 |
-
if stream_mode == "updates" and hasattr(event, "items"):
|
| 282 |
-
for node, updates in event.items():
|
| 283 |
-
if updates:
|
| 284 |
-
other_updates = {k: v for k, v in updates.items() if k != "messages"}
|
| 285 |
-
if other_updates:
|
| 286 |
-
yield f"data: {json.dumps({'type': 'update', 'node': node, 'updates': other_updates})}\n\n"
|
| 287 |
-
|
| 288 |
-
# LangGraph streaming may emit tuples: (field_name, field_value)
|
| 289 |
-
# e.g. ('content', <str>), ('tool_calls', [ToolCall,...]), ('additional_kwargs', {...}), etc.
|
| 290 |
-
# We accumulate only supported fields into `parts` and skip unsupported metadata.
|
| 291 |
-
# More info at: https://langchain-ai.github.io/langgraph/cloud/how-tos/stream_messages/
|
| 292 |
-
processed_messages = []
|
| 293 |
-
current_message: dict[str, Any] = {}
|
| 294 |
-
for message in new_messages:
|
| 295 |
-
if isinstance(message, tuple):
|
| 296 |
-
key, value = message
|
| 297 |
-
# Store parts in temporary dict
|
| 298 |
-
current_message[key] = value
|
| 299 |
-
else:
|
| 300 |
-
# Add complete message if we have one in progress
|
| 301 |
-
if current_message:
|
| 302 |
-
processed_messages.append(_create_ai_message(current_message))
|
| 303 |
-
current_message = {}
|
| 304 |
-
processed_messages.append(message)
|
| 305 |
-
|
| 306 |
-
# Add any remaining message parts
|
| 307 |
-
if current_message:
|
| 308 |
-
processed_messages.append(_create_ai_message(current_message))
|
| 309 |
-
|
| 310 |
-
for message in processed_messages:
|
| 311 |
-
try:
|
| 312 |
-
chat_message = langchain_to_chat_message(message)
|
| 313 |
-
chat_message.run_id = str(run_id)
|
| 314 |
-
except Exception as e:
|
| 315 |
-
logger.error(f"Error parsing message: {e}")
|
| 316 |
-
yield f"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\n\n"
|
| 317 |
-
continue
|
| 318 |
-
# LangGraph re-sends the input message, which feels weird, so drop it
|
| 319 |
-
if chat_message.type == "human" and chat_message.content == user_input.message:
|
| 320 |
-
continue
|
| 321 |
-
yield f"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\n\n"
|
| 322 |
-
|
| 323 |
-
if stream_mode == "messages":
|
| 324 |
-
if not user_input.stream_tokens:
|
| 325 |
-
continue
|
| 326 |
-
msg, metadata = event
|
| 327 |
-
if "skip_stream" in metadata.get("tags", []):
|
| 328 |
-
continue
|
| 329 |
-
# For some reason, astream("messages") causes non-LLM nodes to send extra messages.
|
| 330 |
-
# Drop them.
|
| 331 |
-
if not isinstance(msg, AIMessageChunk):
|
| 332 |
-
continue
|
| 333 |
-
content = remove_tool_calls(msg.content)
|
| 334 |
-
if content:
|
| 335 |
-
# Empty content in the context of OpenAI usually means
|
| 336 |
-
# that the model is asking for a tool to be invoked.
|
| 337 |
-
# So we only print non-empty content.
|
| 338 |
-
yield f"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\n\n"
|
| 339 |
-
except Exception as e:
|
| 340 |
-
logger.error(f"Error in message generator: {e}")
|
| 341 |
-
yield f"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\n\n"
|
| 342 |
-
finally:
|
| 343 |
-
yield "data: [DONE]\n\n"
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
def _create_ai_message(parts: dict) -> AIMessage:
|
| 347 |
-
sig = inspect.signature(AIMessage)
|
| 348 |
-
valid_keys = set(sig.parameters)
|
| 349 |
-
filtered = {k: v for k, v in parts.items() if k in valid_keys}
|
| 350 |
-
return AIMessage(**filtered)
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
def _sse_response_example() -> dict[int | str, Any]:
|
| 354 |
-
return {
|
| 355 |
-
status.HTTP_200_OK: {
|
| 356 |
-
"description": "Server Sent Event Response",
|
| 357 |
-
"content": {
|
| 358 |
-
"text/event-stream": {
|
| 359 |
-
"example": "data: {'type': 'token', 'content': 'Hello'}\n\ndata: {'type': 'token', 'content': ' World'}\n\ndata: [DONE]\n\n",
|
| 360 |
-
"schema": {"type": "string"},
|
| 361 |
-
}
|
| 362 |
-
},
|
| 363 |
-
}
|
| 364 |
-
}
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
@router.post(
|
| 368 |
-
"/{agent_id}/stream",
|
| 369 |
-
response_class=StreamingResponse,
|
| 370 |
-
responses=_sse_response_example(),
|
| 371 |
-
operation_id="stream_with_agent_id",
|
| 372 |
-
)
|
| 373 |
-
@router.post("/stream", response_class=StreamingResponse, responses=_sse_response_example())
|
| 374 |
-
async def stream(user_input: StreamInput, agent_id: str = DEFAULT_AGENT) -> StreamingResponse:
|
| 375 |
-
"""
|
| 376 |
-
Stream an agent's response to a user input, including intermediate messages and tokens.
|
| 377 |
-
|
| 378 |
-
If agent_id is not provided, the default agent will be used.
|
| 379 |
-
Use thread_id to persist and continue a multi-turn conversation. run_id kwarg
|
| 380 |
-
is also attached to all messages for recording feedback.
|
| 381 |
-
Use user_id to persist and continue a conversation across multiple threads.
|
| 382 |
-
|
| 383 |
-
Set `stream_tokens=false` to return intermediate messages but not token-by-token.
|
| 384 |
-
"""
|
| 385 |
-
return StreamingResponse(
|
| 386 |
-
message_generator(user_input, agent_id),
|
| 387 |
-
media_type="text/event-stream",
|
| 388 |
-
)
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
@router.post("/feedback")
|
| 392 |
-
async def feedback(feedback: Feedback) -> FeedbackResponse:
|
| 393 |
-
"""
|
| 394 |
-
Record feedback for a run to LangSmith.
|
| 395 |
-
|
| 396 |
-
This is a simple wrapper for the LangSmith create_feedback API, so the
|
| 397 |
-
credentials can be stored and managed in the service rather than the client.
|
| 398 |
-
See: https://api.smith.langchain.com/redoc#tag/feedback/operation/create_feedback_api_v1_feedback_post
|
| 399 |
-
"""
|
| 400 |
-
client = LangsmithClient()
|
| 401 |
-
kwargs = feedback.kwargs or {}
|
| 402 |
-
client.create_feedback(
|
| 403 |
-
run_id=feedback.run_id,
|
| 404 |
-
key=feedback.key,
|
| 405 |
-
score=feedback.score,
|
| 406 |
-
**kwargs,
|
| 407 |
-
)
|
| 408 |
-
return FeedbackResponse()
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
@router.post("/history")
|
| 412 |
-
async def history(input: ChatHistoryInput) -> ChatHistory:
|
| 413 |
-
"""
|
| 414 |
-
Get chat history.
|
| 415 |
-
"""
|
| 416 |
-
# TODO: Hard-coding DEFAULT_AGENT here is wonky
|
| 417 |
-
agent: AgentGraph = get_agent(DEFAULT_AGENT)
|
| 418 |
-
try:
|
| 419 |
-
state_snapshot = await agent.aget_state(
|
| 420 |
-
config=RunnableConfig(configurable={"thread_id": input.thread_id})
|
| 421 |
-
)
|
| 422 |
-
messages: list[AnyMessage] = state_snapshot.values["messages"]
|
| 423 |
-
chat_messages: list[ChatMessage] = [langchain_to_chat_message(m) for m in messages]
|
| 424 |
-
return ChatHistory(messages=chat_messages)
|
| 425 |
-
except Exception as e:
|
| 426 |
-
logger.error(f"An exception occurred: {e}")
|
| 427 |
-
raise HTTPException(status_code=500, detail="Unexpected error")
|
| 428 |
|
| 429 |
|
| 430 |
@app.get("/health")
|
|
@@ -436,7 +84,9 @@ async def health_check():
|
|
| 436 |
if settings.LANGFUSE_TRACING:
|
| 437 |
try:
|
| 438 |
langfuse = Langfuse()
|
| 439 |
-
health_status["langfuse"] =
|
|
|
|
|
|
|
| 440 |
except Exception as e:
|
| 441 |
logger.error(f"Langfuse connection error: {e}")
|
| 442 |
health_status["langfuse"] = "disconnected"
|
|
|
|
|
|
|
|
|
|
| 1 |
import logging
|
| 2 |
import warnings
|
| 3 |
from collections.abc import AsyncGenerator
|
| 4 |
from contextlib import asynccontextmanager
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
from fastapi import FastAPI
|
|
|
|
|
|
|
| 7 |
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from fastapi.routing import APIRoute
|
| 9 |
from langchain_core._api import LangChainBetaWarning
|
| 10 |
+
from langfuse import Langfuse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
from agents import get_agent, get_all_agent_info, load_agent
|
| 13 |
from core import settings
|
| 14 |
from memory import initialize_database, initialize_store
|
| 15 |
+
from service.router import router
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
warnings.filterwarnings("ignore", category=LangChainBetaWarning)
|
| 18 |
logger = logging.getLogger(__name__)
|
|
|
|
| 23 |
return route.name
|
| 24 |
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
@asynccontextmanager
|
| 27 |
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
| 28 |
"""
|
|
|
|
| 60 |
raise
|
| 61 |
|
| 62 |
|
| 63 |
+
app = FastAPI(
|
| 64 |
+
lifespan=lifespan, generate_unique_id_function=custom_generate_unique_id
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Configure CORS
|
| 68 |
app.add_middleware(
|
| 69 |
CORSMiddleware,
|
| 70 |
allow_origins=settings.CORS_ORIGINS,
|
| 71 |
allow_credentials=True,
|
| 72 |
allow_methods=["*"],
|
| 73 |
allow_headers=["*"],
|
| 74 |
+
expose_headers=["*"],
|
| 75 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
@app.get("/health")
|
|
|
|
| 84 |
if settings.LANGFUSE_TRACING:
|
| 85 |
try:
|
| 86 |
langfuse = Langfuse()
|
| 87 |
+
health_status["langfuse"] = (
|
| 88 |
+
"connected" if langfuse.auth_check() else "disconnected"
|
| 89 |
+
)
|
| 90 |
except Exception as e:
|
| 91 |
logger.error(f"Langfuse connection error: {e}")
|
| 92 |
health_status["langfuse"] = "disconnected"
|
src/service/utils.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from langchain_core.messages import (
|
| 2 |
AIMessage,
|
| 3 |
BaseMessage,
|
|
@@ -11,6 +14,19 @@ from langchain_core.messages import (
|
|
| 11 |
from schema import ChatMessage
|
| 12 |
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
def convert_message_content_to_string(content: str | list[str | dict]) -> str:
|
| 15 |
if isinstance(content, str):
|
| 16 |
return content
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import toons
|
| 4 |
from langchain_core.messages import (
|
| 5 |
AIMessage,
|
| 6 |
BaseMessage,
|
|
|
|
| 14 |
from schema import ChatMessage
|
| 15 |
|
| 16 |
|
| 17 |
+
def convert_tool_response_to_toon(content: str) -> str:
|
| 18 |
+
"""
|
| 19 |
+
Convert JSON tool response to TOON format to reduce tokens (30–60% fewer than JSON).
|
| 20 |
+
https://pypi.org/project/toons/
|
| 21 |
+
"""
|
| 22 |
+
if not content or not isinstance(content, str):
|
| 23 |
+
return content
|
| 24 |
+
try:
|
| 25 |
+
data = json.loads(content)
|
| 26 |
+
except (json.JSONDecodeError, TypeError):
|
| 27 |
+
return content
|
| 28 |
+
return toons.dumps(data)
|
| 29 |
+
|
| 30 |
def convert_message_content_to_string(content: str | list[str | dict]) -> str:
|
| 31 |
if isinstance(content, str):
|
| 32 |
return content
|
uv.lock
CHANGED
|
@@ -8,12 +8,11 @@ resolution-markers = [
|
|
| 8 |
]
|
| 9 |
|
| 10 |
[[package]]
|
| 11 |
-
name = "
|
| 12 |
version = "0.1.0"
|
| 13 |
source = { virtual = "." }
|
| 14 |
dependencies = [
|
| 15 |
{ name = "ddgs" },
|
| 16 |
-
{ name = "docx2txt" },
|
| 17 |
{ name = "duckduckgo-search" },
|
| 18 |
{ name = "fastapi" },
|
| 19 |
{ name = "grpcio" },
|
|
@@ -36,22 +35,14 @@ dependencies = [
|
|
| 36 |
{ name = "langgraph-checkpoint-mongodb" },
|
| 37 |
{ name = "langgraph-checkpoint-postgres" },
|
| 38 |
{ name = "langgraph-checkpoint-sqlite" },
|
| 39 |
-
{ name = "langgraph-supervisor" },
|
| 40 |
{ name = "langsmith" },
|
| 41 |
-
{ name = "numexpr" },
|
| 42 |
-
{ name = "numpy" },
|
| 43 |
-
{ name = "onnxruntime" },
|
| 44 |
-
{ name = "pandas" },
|
| 45 |
{ name = "psycopg", extra = ["binary", "pool"] },
|
| 46 |
-
{ name = "pyarrow" },
|
| 47 |
{ name = "pydantic" },
|
| 48 |
{ name = "pydantic-settings" },
|
| 49 |
-
{ name = "pyowm" },
|
| 50 |
-
{ name = "pypdf" },
|
| 51 |
{ name = "python-dotenv" },
|
| 52 |
{ name = "setuptools" },
|
| 53 |
-
{ name = "streamlit" },
|
| 54 |
{ name = "tiktoken" },
|
|
|
|
| 55 |
{ name = "uvicorn" },
|
| 56 |
]
|
| 57 |
|
|
@@ -60,7 +51,6 @@ client = [
|
|
| 60 |
{ name = "httpx" },
|
| 61 |
{ name = "pydantic" },
|
| 62 |
{ name = "python-dotenv" },
|
| 63 |
-
{ name = "streamlit" },
|
| 64 |
]
|
| 65 |
dev = [
|
| 66 |
{ name = "langgraph-cli", extra = ["inmem"] },
|
|
@@ -76,7 +66,6 @@ dev = [
|
|
| 76 |
[package.metadata]
|
| 77 |
requires-dist = [
|
| 78 |
{ name = "ddgs", specifier = ">=9.9.1" },
|
| 79 |
-
{ name = "docx2txt", specifier = "~=0.8" },
|
| 80 |
{ name = "duckduckgo-search", specifier = ">=7.3.0" },
|
| 81 |
{ name = "fastapi", specifier = "~=0.115.5" },
|
| 82 |
{ name = "grpcio", specifier = ">=1.68.0" },
|
|
@@ -99,22 +88,14 @@ requires-dist = [
|
|
| 99 |
{ name = "langgraph-checkpoint-mongodb", specifier = "~=0.1.3" },
|
| 100 |
{ name = "langgraph-checkpoint-postgres", specifier = "~=2.0.13" },
|
| 101 |
{ name = "langgraph-checkpoint-sqlite", specifier = "~=2.0.1" },
|
| 102 |
-
{ name = "langgraph-supervisor", specifier = "~=0.0.31" },
|
| 103 |
{ name = "langsmith", specifier = "~=0.4.0" },
|
| 104 |
-
{ name = "numexpr", specifier = "~=2.10.1" },
|
| 105 |
-
{ name = "numpy", specifier = "~=2.3.4" },
|
| 106 |
-
{ name = "onnxruntime", specifier = "~=1.21.1" },
|
| 107 |
-
{ name = "pandas", specifier = "~=2.2.3" },
|
| 108 |
{ name = "psycopg", extras = ["binary", "pool"], specifier = "~=3.2.4" },
|
| 109 |
-
{ name = "pyarrow", specifier = ">=18.1.0" },
|
| 110 |
{ name = "pydantic", specifier = "~=2.10.1" },
|
| 111 |
{ name = "pydantic-settings", specifier = "~=2.12.0" },
|
| 112 |
-
{ name = "pyowm", specifier = "~=3.3.0" },
|
| 113 |
-
{ name = "pypdf", specifier = "~=5.3.0" },
|
| 114 |
{ name = "python-dotenv", specifier = "~=1.0.1" },
|
| 115 |
{ name = "setuptools", specifier = "~=75.6.0" },
|
| 116 |
-
{ name = "streamlit", specifier = "~=1.52.0" },
|
| 117 |
{ name = "tiktoken", specifier = ">=0.8.0" },
|
|
|
|
| 118 |
{ name = "uvicorn", specifier = "~=0.32.1" },
|
| 119 |
]
|
| 120 |
|
|
@@ -123,7 +104,6 @@ client = [
|
|
| 123 |
{ name = "httpx", specifier = "~=0.28.0" },
|
| 124 |
{ name = "pydantic", specifier = "~=2.10.1" },
|
| 125 |
{ name = "python-dotenv", specifier = "~=1.0.1" },
|
| 126 |
-
{ name = "streamlit", specifier = "~=1.52.0" },
|
| 127 |
]
|
| 128 |
dev = [
|
| 129 |
{ name = "langgraph-cli", extras = ["inmem"] },
|
|
@@ -238,22 +218,6 @@ wheels = [
|
|
| 238 |
{ url = "https://files.pythonhosted.org/packages/f5/10/6c25ed6de94c49f88a91fa5018cb4c0f3625f31d5be9f771ebe5cc7cd506/aiosqlite-0.21.0-py3-none-any.whl", hash = "sha256:2549cf4057f95f53dcba16f2b64e8e2791d7e1adedb13197dd8ed77bb226d7d0", size = 15792, upload-time = "2025-02-03T07:30:13.6Z" },
|
| 239 |
]
|
| 240 |
|
| 241 |
-
[[package]]
|
| 242 |
-
name = "altair"
|
| 243 |
-
version = "5.5.0"
|
| 244 |
-
source = { registry = "https://pypi.org/simple" }
|
| 245 |
-
dependencies = [
|
| 246 |
-
{ name = "jinja2" },
|
| 247 |
-
{ name = "jsonschema" },
|
| 248 |
-
{ name = "narwhals" },
|
| 249 |
-
{ name = "packaging" },
|
| 250 |
-
{ name = "typing-extensions" },
|
| 251 |
-
]
|
| 252 |
-
sdist = { url = "https://files.pythonhosted.org/packages/16/b1/f2969c7bdb8ad8bbdda031687defdce2c19afba2aa2c8e1d2a17f78376d8/altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d", size = 705305, upload-time = "2024-11-23T23:39:58.542Z" }
|
| 253 |
-
wheels = [
|
| 254 |
-
{ url = "https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c", size = 731200, upload-time = "2024-11-23T23:39:56.4Z" },
|
| 255 |
-
]
|
| 256 |
-
|
| 257 |
[[package]]
|
| 258 |
name = "annotated-types"
|
| 259 |
version = "0.7.0"
|
|
@@ -346,15 +310,6 @@ wheels = [
|
|
| 346 |
{ url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148, upload-time = "2022-10-05T19:19:30.546Z" },
|
| 347 |
]
|
| 348 |
|
| 349 |
-
[[package]]
|
| 350 |
-
name = "blinker"
|
| 351 |
-
version = "1.9.0"
|
| 352 |
-
source = { registry = "https://pypi.org/simple" }
|
| 353 |
-
sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" }
|
| 354 |
-
wheels = [
|
| 355 |
-
{ url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" },
|
| 356 |
-
]
|
| 357 |
-
|
| 358 |
[[package]]
|
| 359 |
name = "blockbuster"
|
| 360 |
version = "1.5.25"
|
|
@@ -651,18 +606,6 @@ wheels = [
|
|
| 651 |
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
|
| 652 |
]
|
| 653 |
|
| 654 |
-
[[package]]
|
| 655 |
-
name = "coloredlogs"
|
| 656 |
-
version = "15.0.1"
|
| 657 |
-
source = { registry = "https://pypi.org/simple" }
|
| 658 |
-
dependencies = [
|
| 659 |
-
{ name = "humanfriendly" },
|
| 660 |
-
]
|
| 661 |
-
sdist = { url = "https://files.pythonhosted.org/packages/cc/c7/eed8f27100517e8c0e6b923d5f0845d0cb99763da6fdee00478f91db7325/coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0", size = 278520, upload-time = "2021-06-11T10:22:45.202Z" }
|
| 662 |
-
wheels = [
|
| 663 |
-
{ url = "https://files.pythonhosted.org/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934", size = 46018, upload-time = "2021-06-11T10:22:42.561Z" },
|
| 664 |
-
]
|
| 665 |
-
|
| 666 |
[[package]]
|
| 667 |
name = "coverage"
|
| 668 |
version = "7.11.3"
|
|
@@ -835,15 +778,6 @@ wheels = [
|
|
| 835 |
{ url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" },
|
| 836 |
]
|
| 837 |
|
| 838 |
-
[[package]]
|
| 839 |
-
name = "docx2txt"
|
| 840 |
-
version = "0.9"
|
| 841 |
-
source = { registry = "https://pypi.org/simple" }
|
| 842 |
-
sdist = { url = "https://files.pythonhosted.org/packages/ea/07/4486a038624e885e227fe79111914c01f55aa70a51920ff1a7f2bd216d10/docx2txt-0.9.tar.gz", hash = "sha256:18013f6229b14909028b19aa7bf4f8f3d6e4632d7b089ab29f7f0a4d1f660e28", size = 3613, upload-time = "2025-03-24T20:59:25.21Z" }
|
| 843 |
-
wheels = [
|
| 844 |
-
{ url = "https://files.pythonhosted.org/packages/d6/51/756e71bec48ece0ecc2a10e921ef2756e197dcb7e478f2b43673b6683902/docx2txt-0.9-py3-none-any.whl", hash = "sha256:e3718c0653fd6f2fcf4b51b02a61452ad1c38a4c163bcf0a6fd9486cd38f529a", size = 4025, upload-time = "2025-03-24T20:59:24.394Z" },
|
| 845 |
-
]
|
| 846 |
-
|
| 847 |
[[package]]
|
| 848 |
name = "duckduckgo-search"
|
| 849 |
version = "8.1.1"
|
|
@@ -899,15 +833,6 @@ wheels = [
|
|
| 899 |
{ url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" },
|
| 900 |
]
|
| 901 |
|
| 902 |
-
[[package]]
|
| 903 |
-
name = "flatbuffers"
|
| 904 |
-
version = "25.9.23"
|
| 905 |
-
source = { registry = "https://pypi.org/simple" }
|
| 906 |
-
sdist = { url = "https://files.pythonhosted.org/packages/9d/1f/3ee70b0a55137442038f2a33469cc5fddd7e0ad2abf83d7497c18a2b6923/flatbuffers-25.9.23.tar.gz", hash = "sha256:676f9fa62750bb50cf531b42a0a2a118ad8f7f797a511eda12881c016f093b12", size = 22067, upload-time = "2025-09-24T05:25:30.106Z" }
|
| 907 |
-
wheels = [
|
| 908 |
-
{ url = "https://files.pythonhosted.org/packages/ee/1b/00a78aa2e8fbd63f9af08c9c19e6deb3d5d66b4dda677a0f61654680ee89/flatbuffers-25.9.23-py2.py3-none-any.whl", hash = "sha256:255538574d6cb6d0a79a17ec8bc0d30985913b87513a01cce8bcdb6b4c44d0e2", size = 30869, upload-time = "2025-09-24T05:25:28.912Z" },
|
| 909 |
-
]
|
| 910 |
-
|
| 911 |
[[package]]
|
| 912 |
name = "forbiddenfruit"
|
| 913 |
version = "0.1.4"
|
|
@@ -987,39 +912,6 @@ wheels = [
|
|
| 987 |
{ url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" },
|
| 988 |
]
|
| 989 |
|
| 990 |
-
[[package]]
|
| 991 |
-
name = "geojson"
|
| 992 |
-
version = "2.5.0"
|
| 993 |
-
source = { registry = "https://pypi.org/simple" }
|
| 994 |
-
sdist = { url = "https://files.pythonhosted.org/packages/b6/8d/c42d3fe6f9b5e5bd6a55d9f03813d674d65d853cb12e6bc56f154a2ceca0/geojson-2.5.0.tar.gz", hash = "sha256:6e4bb7ace4226a45d9c8c8b1348b3fc43540658359f93c3f7e03efa9f15f658a", size = 23086, upload-time = "2019-08-09T20:32:37.448Z" }
|
| 995 |
-
wheels = [
|
| 996 |
-
{ url = "https://files.pythonhosted.org/packages/e4/8d/9e28e9af95739e6d2d2f8d4bef0b3432da40b7c3588fbad4298c1be09e48/geojson-2.5.0-py2.py3-none-any.whl", hash = "sha256:ccbd13368dd728f4e4f13ffe6aaf725b6e802c692ba0dde628be475040c534ba", size = 14839, upload-time = "2019-08-09T20:32:15.536Z" },
|
| 997 |
-
]
|
| 998 |
-
|
| 999 |
-
[[package]]
|
| 1000 |
-
name = "gitdb"
|
| 1001 |
-
version = "4.0.12"
|
| 1002 |
-
source = { registry = "https://pypi.org/simple" }
|
| 1003 |
-
dependencies = [
|
| 1004 |
-
{ name = "smmap" },
|
| 1005 |
-
]
|
| 1006 |
-
sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" }
|
| 1007 |
-
wheels = [
|
| 1008 |
-
{ url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" },
|
| 1009 |
-
]
|
| 1010 |
-
|
| 1011 |
-
[[package]]
|
| 1012 |
-
name = "gitpython"
|
| 1013 |
-
version = "3.1.45"
|
| 1014 |
-
source = { registry = "https://pypi.org/simple" }
|
| 1015 |
-
dependencies = [
|
| 1016 |
-
{ name = "gitdb" },
|
| 1017 |
-
]
|
| 1018 |
-
sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" }
|
| 1019 |
-
wheels = [
|
| 1020 |
-
{ url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" },
|
| 1021 |
-
]
|
| 1022 |
-
|
| 1023 |
[[package]]
|
| 1024 |
name = "google-ai-generativelanguage"
|
| 1025 |
version = "0.9.0"
|
|
@@ -1486,18 +1378,6 @@ wheels = [
|
|
| 1486 |
{ url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" },
|
| 1487 |
]
|
| 1488 |
|
| 1489 |
-
[[package]]
|
| 1490 |
-
name = "humanfriendly"
|
| 1491 |
-
version = "10.0"
|
| 1492 |
-
source = { registry = "https://pypi.org/simple" }
|
| 1493 |
-
dependencies = [
|
| 1494 |
-
{ name = "pyreadline3", marker = "sys_platform == 'win32'" },
|
| 1495 |
-
]
|
| 1496 |
-
sdist = { url = "https://files.pythonhosted.org/packages/cc/3f/2c29224acb2e2df4d2046e4c73ee2662023c58ff5b113c4c1adac0886c43/humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc", size = 360702, upload-time = "2021-09-17T21:40:43.31Z" }
|
| 1497 |
-
wheels = [
|
| 1498 |
-
{ url = "https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477", size = 86794, upload-time = "2021-09-17T21:40:39.897Z" },
|
| 1499 |
-
]
|
| 1500 |
-
|
| 1501 |
[[package]]
|
| 1502 |
name = "hyperframe"
|
| 1503 |
version = "6.1.0"
|
|
@@ -1546,18 +1426,6 @@ wheels = [
|
|
| 1546 |
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
|
| 1547 |
]
|
| 1548 |
|
| 1549 |
-
[[package]]
|
| 1550 |
-
name = "jinja2"
|
| 1551 |
-
version = "3.1.6"
|
| 1552 |
-
source = { registry = "https://pypi.org/simple" }
|
| 1553 |
-
dependencies = [
|
| 1554 |
-
{ name = "markupsafe" },
|
| 1555 |
-
]
|
| 1556 |
-
sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
|
| 1557 |
-
wheels = [
|
| 1558 |
-
{ url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
|
| 1559 |
-
]
|
| 1560 |
-
|
| 1561 |
[[package]]
|
| 1562 |
name = "jiter"
|
| 1563 |
version = "0.8.2"
|
|
@@ -2124,19 +1992,6 @@ wheels = [
|
|
| 2124 |
{ url = "https://files.pythonhosted.org/packages/66/05/b2d34e16638241e6f27a6946d28160d4b8b641383787646d41a3727e0896/langgraph_sdk-0.2.9-py3-none-any.whl", hash = "sha256:fbf302edadbf0fb343596f91c597794e936ef68eebc0d3e1d358b6f9f72a1429", size = 56752, upload-time = "2025-09-20T18:49:13.346Z" },
|
| 2125 |
]
|
| 2126 |
|
| 2127 |
-
[[package]]
|
| 2128 |
-
name = "langgraph-supervisor"
|
| 2129 |
-
version = "0.0.31"
|
| 2130 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2131 |
-
dependencies = [
|
| 2132 |
-
{ name = "langchain-core" },
|
| 2133 |
-
{ name = "langgraph" },
|
| 2134 |
-
]
|
| 2135 |
-
sdist = { url = "https://files.pythonhosted.org/packages/15/a0/6e9286a2e4a9ed8c9d299d0131b323cae376316a63286d2880031bb01082/langgraph_supervisor-0.0.31.tar.gz", hash = "sha256:0f71d5098fa1ac274423eff08750345450045c1e0dd2480ab7f3e5c1848f6016", size = 21910, upload-time = "2025-11-19T18:34:29.795Z" }
|
| 2136 |
-
wheels = [
|
| 2137 |
-
{ url = "https://files.pythonhosted.org/packages/1d/7f/bb73da8db36c96f32cdd0292f2cd75e2d671150b0d177575da560ad994a1/langgraph_supervisor-0.0.31-py3-none-any.whl", hash = "sha256:6bbe5311f536d38779766cc6f074005e3e892d37edbfd875731923221daa1889", size = 16505, upload-time = "2025-11-19T18:34:28.481Z" },
|
| 2138 |
-
]
|
| 2139 |
-
|
| 2140 |
[[package]]
|
| 2141 |
name = "langsmith"
|
| 2142 |
version = "0.4.43"
|
|
@@ -2230,58 +2085,6 @@ wheels = [
|
|
| 2230 |
{ url = "https://files.pythonhosted.org/packages/6c/77/d7f491cbc05303ac6801651aabeb262d43f319288c1ea96c66b1d2692ff3/lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e", size = 3518768, upload-time = "2025-09-22T04:04:57.097Z" },
|
| 2231 |
]
|
| 2232 |
|
| 2233 |
-
[[package]]
|
| 2234 |
-
name = "markupsafe"
|
| 2235 |
-
version = "3.0.3"
|
| 2236 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2237 |
-
sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" }
|
| 2238 |
-
wheels = [
|
| 2239 |
-
{ url = "https://files.pythonhosted.org/packages/08/db/fefacb2136439fc8dd20e797950e749aa1f4997ed584c62cfb8ef7c2be0e/markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad", size = 11631, upload-time = "2025-09-27T18:36:18.185Z" },
|
| 2240 |
-
{ url = "https://files.pythonhosted.org/packages/e1/2e/5898933336b61975ce9dc04decbc0a7f2fee78c30353c5efba7f2d6ff27a/markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a", size = 12058, upload-time = "2025-09-27T18:36:19.444Z" },
|
| 2241 |
-
{ url = "https://files.pythonhosted.org/packages/1d/09/adf2df3699d87d1d8184038df46a9c80d78c0148492323f4693df54e17bb/markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50", size = 24287, upload-time = "2025-09-27T18:36:20.768Z" },
|
| 2242 |
-
{ url = "https://files.pythonhosted.org/packages/30/ac/0273f6fcb5f42e314c6d8cd99effae6a5354604d461b8d392b5ec9530a54/markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf", size = 22940, upload-time = "2025-09-27T18:36:22.249Z" },
|
| 2243 |
-
{ url = "https://files.pythonhosted.org/packages/19/ae/31c1be199ef767124c042c6c3e904da327a2f7f0cd63a0337e1eca2967a8/markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f", size = 21887, upload-time = "2025-09-27T18:36:23.535Z" },
|
| 2244 |
-
{ url = "https://files.pythonhosted.org/packages/b2/76/7edcab99d5349a4532a459e1fe64f0b0467a3365056ae550d3bcf3f79e1e/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a", size = 23692, upload-time = "2025-09-27T18:36:24.823Z" },
|
| 2245 |
-
{ url = "https://files.pythonhosted.org/packages/a4/28/6e74cdd26d7514849143d69f0bf2399f929c37dc2b31e6829fd2045b2765/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115", size = 21471, upload-time = "2025-09-27T18:36:25.95Z" },
|
| 2246 |
-
{ url = "https://files.pythonhosted.org/packages/62/7e/a145f36a5c2945673e590850a6f8014318d5577ed7e5920a4b3448e0865d/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a", size = 22923, upload-time = "2025-09-27T18:36:27.109Z" },
|
| 2247 |
-
{ url = "https://files.pythonhosted.org/packages/0f/62/d9c46a7f5c9adbeeeda52f5b8d802e1094e9717705a645efc71b0913a0a8/markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19", size = 14572, upload-time = "2025-09-27T18:36:28.045Z" },
|
| 2248 |
-
{ url = "https://files.pythonhosted.org/packages/83/8a/4414c03d3f891739326e1783338e48fb49781cc915b2e0ee052aa490d586/markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01", size = 15077, upload-time = "2025-09-27T18:36:29.025Z" },
|
| 2249 |
-
{ url = "https://files.pythonhosted.org/packages/35/73/893072b42e6862f319b5207adc9ae06070f095b358655f077f69a35601f0/markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c", size = 13876, upload-time = "2025-09-27T18:36:29.954Z" },
|
| 2250 |
-
{ url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" },
|
| 2251 |
-
{ url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" },
|
| 2252 |
-
{ url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" },
|
| 2253 |
-
{ url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" },
|
| 2254 |
-
{ url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" },
|
| 2255 |
-
{ url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" },
|
| 2256 |
-
{ url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" },
|
| 2257 |
-
{ url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" },
|
| 2258 |
-
{ url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" },
|
| 2259 |
-
{ url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" },
|
| 2260 |
-
{ url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" },
|
| 2261 |
-
{ url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" },
|
| 2262 |
-
{ url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" },
|
| 2263 |
-
{ url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" },
|
| 2264 |
-
{ url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" },
|
| 2265 |
-
{ url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" },
|
| 2266 |
-
{ url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" },
|
| 2267 |
-
{ url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" },
|
| 2268 |
-
{ url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" },
|
| 2269 |
-
{ url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" },
|
| 2270 |
-
{ url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" },
|
| 2271 |
-
{ url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" },
|
| 2272 |
-
{ url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" },
|
| 2273 |
-
{ url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" },
|
| 2274 |
-
{ url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" },
|
| 2275 |
-
{ url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" },
|
| 2276 |
-
{ url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" },
|
| 2277 |
-
{ url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" },
|
| 2278 |
-
{ url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" },
|
| 2279 |
-
{ url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" },
|
| 2280 |
-
{ url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" },
|
| 2281 |
-
{ url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" },
|
| 2282 |
-
{ url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" },
|
| 2283 |
-
]
|
| 2284 |
-
|
| 2285 |
[[package]]
|
| 2286 |
name = "marshmallow"
|
| 2287 |
version = "3.26.1"
|
|
@@ -2328,15 +2131,6 @@ wheels = [
|
|
| 2328 |
{ url = "https://files.pythonhosted.org/packages/01/9a/35e053d4f442addf751ed20e0e922476508ee580786546d699b0567c4c67/motor-3.7.1-py3-none-any.whl", hash = "sha256:8a63b9049e38eeeb56b4fdd57c3312a6d1f25d01db717fe7d82222393c410298", size = 74996, upload-time = "2025-05-14T18:56:31.665Z" },
|
| 2329 |
]
|
| 2330 |
|
| 2331 |
-
[[package]]
|
| 2332 |
-
name = "mpmath"
|
| 2333 |
-
version = "1.3.0"
|
| 2334 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2335 |
-
sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" }
|
| 2336 |
-
wheels = [
|
| 2337 |
-
{ url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" },
|
| 2338 |
-
]
|
| 2339 |
-
|
| 2340 |
[[package]]
|
| 2341 |
name = "multidict"
|
| 2342 |
version = "6.7.0"
|
|
@@ -2459,15 +2253,6 @@ wheels = [
|
|
| 2459 |
{ url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
|
| 2460 |
]
|
| 2461 |
|
| 2462 |
-
[[package]]
|
| 2463 |
-
name = "narwhals"
|
| 2464 |
-
version = "2.11.0"
|
| 2465 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2466 |
-
sdist = { url = "https://files.pythonhosted.org/packages/7d/a2/25208347aa4c2d82a265cf4bc0873aaf5069f525c0438146821e7fc19ef5/narwhals-2.11.0.tar.gz", hash = "sha256:d23f3ea7efc6b4d0355444a72de6b8fa3011175585246c3400c894a7583964af", size = 589233, upload-time = "2025-11-10T16:28:35.675Z" }
|
| 2467 |
-
wheels = [
|
| 2468 |
-
{ url = "https://files.pythonhosted.org/packages/c0/a1/4d21933898e23b011ae0528151b57a9230a62960d0919bf2ee48c7f5c20a/narwhals-2.11.0-py3-none-any.whl", hash = "sha256:a9795e1e44aa94e5ba6406ef1c5ee4c172414ced4f1aea4a79e5894f0c7378d4", size = 423069, upload-time = "2025-11-10T16:28:33.522Z" },
|
| 2469 |
-
]
|
| 2470 |
-
|
| 2471 |
[[package]]
|
| 2472 |
name = "nodeenv"
|
| 2473 |
version = "1.9.1"
|
|
@@ -2581,35 +2366,6 @@ wheels = [
|
|
| 2581 |
{ url = "https://files.pythonhosted.org/packages/47/4f/4a617ee93d8208d2bcf26b2d8b9402ceaed03e3853c754940e2290fed063/ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c", size = 14354, upload-time = "2025-11-13T23:02:16.292Z" },
|
| 2582 |
]
|
| 2583 |
|
| 2584 |
-
[[package]]
|
| 2585 |
-
name = "onnxruntime"
|
| 2586 |
-
version = "1.21.1"
|
| 2587 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2588 |
-
dependencies = [
|
| 2589 |
-
{ name = "coloredlogs" },
|
| 2590 |
-
{ name = "flatbuffers" },
|
| 2591 |
-
{ name = "numpy" },
|
| 2592 |
-
{ name = "packaging" },
|
| 2593 |
-
{ name = "protobuf" },
|
| 2594 |
-
{ name = "sympy" },
|
| 2595 |
-
]
|
| 2596 |
-
wheels = [
|
| 2597 |
-
{ url = "https://files.pythonhosted.org/packages/70/ba/13c46c22fb52d8fea53575da163399a7d75fe61223aba685370f047a0882/onnxruntime-1.21.1-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:8bee9b5ba7b88ae7bfccb4f97bbe1b4bae801b0fb05d686b28a722cb27c89931", size = 33643424, upload-time = "2025-04-18T12:01:17.445Z" },
|
| 2598 |
-
{ url = "https://files.pythonhosted.org/packages/18/4f/68985138c507b6ad34061aa4f330b8fbd30b0c5c299be53f0c829420528e/onnxruntime-1.21.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4b6a29a1767b92d543091349f5397a1c7619eaca746cd1bc47f8b4ec5a9f1a6c", size = 14162437, upload-time = "2025-04-18T12:01:39.412Z" },
|
| 2599 |
-
{ url = "https://files.pythonhosted.org/packages/0f/76/7dfa4b63f95a17eaf881c9c464feaa59a25bbfb578db204fc22d522b5199/onnxruntime-1.21.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:982dcc04a6688e1af9e3da1d4ef2bdeb11417cf3f8dde81f8f721043c1919a4f", size = 16002403, upload-time = "2025-04-18T12:01:41.645Z" },
|
| 2600 |
-
{ url = "https://files.pythonhosted.org/packages/80/85/397406e758d6c30fb6d0d0152041c6b9ee835c3584765837ce54230c8bc9/onnxruntime-1.21.1-cp311-cp311-win_amd64.whl", hash = "sha256:2b6052c04b9125319293abb9bdcce40e806db3e097f15b82242d4cd72d81fd0c", size = 12301824, upload-time = "2025-04-18T12:01:20.228Z" },
|
| 2601 |
-
{ url = "https://files.pythonhosted.org/packages/a5/42/274438bbc259439fa1606d0d6d2eef4171cdbd2d7a1c3b249b4ba440424b/onnxruntime-1.21.1-cp312-cp312-macosx_13_0_universal2.whl", hash = "sha256:f615c05869a523a94d0a4de1f0936d0199a473cf104d630fc26174bebd5759bd", size = 33658457, upload-time = "2025-04-18T12:01:22.937Z" },
|
| 2602 |
-
{ url = "https://files.pythonhosted.org/packages/9c/93/76f629d4f22571b0b3a29a9d375204faae2bd2b07d557043b56df5848779/onnxruntime-1.21.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:79dfb1f47386c4edd115b21015354b2f05f5566c40c98606251f15a64add3cbe", size = 14164881, upload-time = "2025-04-18T12:01:44.497Z" },
|
| 2603 |
-
{ url = "https://files.pythonhosted.org/packages/1b/86/75cbaa4058758fa8ef912dfebba2d5a4e4fd6738615c15b6a2262d076198/onnxruntime-1.21.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2742935d6610fe0f58e1995018d9db7e8239d0201d9ebbdb7964a61386b5390a", size = 16019966, upload-time = "2025-04-18T12:01:47.366Z" },
|
| 2604 |
-
{ url = "https://files.pythonhosted.org/packages/5f/9d/fb8895b2cb38c9965d4b4e0a9aa1398f3e3f16c4acb75cf3b61689780a65/onnxruntime-1.21.1-cp312-cp312-win_amd64.whl", hash = "sha256:a7afdb3fcb162f5536225e13c2b245018068964b1d0eee05303ea6823ca6785e", size = 12302925, upload-time = "2025-04-18T12:01:26.147Z" },
|
| 2605 |
-
{ url = "https://files.pythonhosted.org/packages/6d/7e/8445eb44ba9fe0ce0bc77c4b569d79f7e3efd6da2dd87c5a04347e6c134e/onnxruntime-1.21.1-cp313-cp313-macosx_13_0_universal2.whl", hash = "sha256:ed4f9771233a92edcab9f11f537702371d450fe6cd79a727b672d37b9dab0cde", size = 33658643, upload-time = "2025-04-18T12:01:28.73Z" },
|
| 2606 |
-
{ url = "https://files.pythonhosted.org/packages/ce/46/9c4026d302f1c7e8427bf9fa3da2d7526d9c5200242bde6adee7928ef1c9/onnxruntime-1.21.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bc100fd1f4f95258e7d0f7068ec69dec2a47cc693f745eec9cf4561ee8d952a", size = 14165205, upload-time = "2025-04-18T12:01:50.117Z" },
|
| 2607 |
-
{ url = "https://files.pythonhosted.org/packages/44/b2/4e4c6b5c03be752d74cb20937961c76f53fe87a9760d5b7345629d35bb31/onnxruntime-1.21.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0fea0d2b98eecf4bebe01f7ce9a265a5d72b3050e9098063bfe65fa2b0633a8e", size = 16019529, upload-time = "2025-04-18T12:01:52.995Z" },
|
| 2608 |
-
{ url = "https://files.pythonhosted.org/packages/ec/1d/afca646af339cc6735f3fb7fafb9ca94b578c5b6a0ebd63a312468767bdb/onnxruntime-1.21.1-cp313-cp313-win_amd64.whl", hash = "sha256:da606061b9ed1b05b63a37be38c2014679a3e725903f58036ffd626df45c0e47", size = 12303603, upload-time = "2025-04-18T12:01:32.073Z" },
|
| 2609 |
-
{ url = "https://files.pythonhosted.org/packages/a5/12/a01e38c9a6b8d7c28e04d9eb83ad9143d568b961474ba49f0f18a3eeec82/onnxruntime-1.21.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94674315d40d521952bfc28007ce9b6728e87753e1f18d243c8cd953f25903b8", size = 14176329, upload-time = "2025-04-18T12:01:55.227Z" },
|
| 2610 |
-
{ url = "https://files.pythonhosted.org/packages/3a/72/5ff85c540fd6a465610ce47e4cee8fccb472952fc1d589112f51ae2520a5/onnxruntime-1.21.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5c9e4571ff5b2a5d377d414bc85cd9450ba233a9a92f766493874f1093976453", size = 15990556, upload-time = "2025-04-18T12:01:57.979Z" },
|
| 2611 |
-
]
|
| 2612 |
-
|
| 2613 |
[[package]]
|
| 2614 |
name = "openai"
|
| 2615 |
version = "2.2.0"
|
|
@@ -2808,47 +2564,6 @@ wheels = [
|
|
| 2808 |
{ url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
|
| 2809 |
]
|
| 2810 |
|
| 2811 |
-
[[package]]
|
| 2812 |
-
name = "pandas"
|
| 2813 |
-
version = "2.2.3"
|
| 2814 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2815 |
-
dependencies = [
|
| 2816 |
-
{ name = "numpy" },
|
| 2817 |
-
{ name = "python-dateutil" },
|
| 2818 |
-
{ name = "pytz" },
|
| 2819 |
-
{ name = "tzdata" },
|
| 2820 |
-
]
|
| 2821 |
-
sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213, upload-time = "2024-09-20T13:10:04.827Z" }
|
| 2822 |
-
wheels = [
|
| 2823 |
-
{ url = "https://files.pythonhosted.org/packages/a8/44/d9502bf0ed197ba9bf1103c9867d5904ddcaf869e52329787fc54ed70cc8/pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039", size = 12602222, upload-time = "2024-09-20T13:08:56.254Z" },
|
| 2824 |
-
{ url = "https://files.pythonhosted.org/packages/52/11/9eac327a38834f162b8250aab32a6781339c69afe7574368fffe46387edf/pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd", size = 11321274, upload-time = "2024-09-20T13:08:58.645Z" },
|
| 2825 |
-
{ url = "https://files.pythonhosted.org/packages/45/fb/c4beeb084718598ba19aa9f5abbc8aed8b42f90930da861fcb1acdb54c3a/pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698", size = 15579836, upload-time = "2024-09-20T19:01:57.571Z" },
|
| 2826 |
-
{ url = "https://files.pythonhosted.org/packages/cd/5f/4dba1d39bb9c38d574a9a22548c540177f78ea47b32f99c0ff2ec499fac5/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc", size = 13058505, upload-time = "2024-09-20T13:09:01.501Z" },
|
| 2827 |
-
{ url = "https://files.pythonhosted.org/packages/b9/57/708135b90391995361636634df1f1130d03ba456e95bcf576fada459115a/pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3", size = 16744420, upload-time = "2024-09-20T19:02:00.678Z" },
|
| 2828 |
-
{ url = "https://files.pythonhosted.org/packages/86/4a/03ed6b7ee323cf30404265c284cee9c65c56a212e0a08d9ee06984ba2240/pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32", size = 14440457, upload-time = "2024-09-20T13:09:04.105Z" },
|
| 2829 |
-
{ url = "https://files.pythonhosted.org/packages/ed/8c/87ddf1fcb55d11f9f847e3c69bb1c6f8e46e2f40ab1a2d2abadb2401b007/pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5", size = 11617166, upload-time = "2024-09-20T13:09:06.917Z" },
|
| 2830 |
-
{ url = "https://files.pythonhosted.org/packages/17/a3/fb2734118db0af37ea7433f57f722c0a56687e14b14690edff0cdb4b7e58/pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", size = 12529893, upload-time = "2024-09-20T13:09:09.655Z" },
|
| 2831 |
-
{ url = "https://files.pythonhosted.org/packages/e1/0c/ad295fd74bfac85358fd579e271cded3ac969de81f62dd0142c426b9da91/pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", size = 11363475, upload-time = "2024-09-20T13:09:14.718Z" },
|
| 2832 |
-
{ url = "https://files.pythonhosted.org/packages/c6/2a/4bba3f03f7d07207481fed47f5b35f556c7441acddc368ec43d6643c5777/pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", size = 15188645, upload-time = "2024-09-20T19:02:03.88Z" },
|
| 2833 |
-
{ url = "https://files.pythonhosted.org/packages/38/f8/d8fddee9ed0d0c0f4a2132c1dfcf0e3e53265055da8df952a53e7eaf178c/pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319", size = 12739445, upload-time = "2024-09-20T13:09:17.621Z" },
|
| 2834 |
-
{ url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235, upload-time = "2024-09-20T19:02:07.094Z" },
|
| 2835 |
-
{ url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756, upload-time = "2024-09-20T13:09:20.474Z" },
|
| 2836 |
-
{ url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248, upload-time = "2024-09-20T13:09:23.137Z" },
|
| 2837 |
-
{ url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643, upload-time = "2024-09-20T13:09:25.522Z" },
|
| 2838 |
-
{ url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573, upload-time = "2024-09-20T13:09:28.012Z" },
|
| 2839 |
-
{ url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085, upload-time = "2024-09-20T19:02:10.451Z" },
|
| 2840 |
-
{ url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809, upload-time = "2024-09-20T13:09:30.814Z" },
|
| 2841 |
-
{ url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316, upload-time = "2024-09-20T19:02:13.825Z" },
|
| 2842 |
-
{ url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055, upload-time = "2024-09-20T13:09:33.462Z" },
|
| 2843 |
-
{ url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175, upload-time = "2024-09-20T13:09:35.871Z" },
|
| 2844 |
-
{ url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650, upload-time = "2024-09-20T13:09:38.685Z" },
|
| 2845 |
-
{ url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177, upload-time = "2024-09-20T13:09:41.141Z" },
|
| 2846 |
-
{ url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526, upload-time = "2024-09-20T19:02:16.905Z" },
|
| 2847 |
-
{ url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013, upload-time = "2024-09-20T13:09:44.39Z" },
|
| 2848 |
-
{ url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620, upload-time = "2024-09-20T19:02:20.639Z" },
|
| 2849 |
-
{ url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" },
|
| 2850 |
-
]
|
| 2851 |
-
|
| 2852 |
[[package]]
|
| 2853 |
name = "pathspec"
|
| 2854 |
version = "0.12.1"
|
|
@@ -2870,68 +2585,6 @@ wheels = [
|
|
| 2870 |
{ url = "https://files.pythonhosted.org/packages/fb/81/f457d6d361e04d061bef413749a6e1ab04d98cfeec6d8abcfe40184750f3/pgvector-0.3.6-py3-none-any.whl", hash = "sha256:f6c269b3c110ccb7496bac87202148ed18f34b390a0189c783e351062400a75a", size = 24880, upload-time = "2024-10-27T00:15:08.045Z" },
|
| 2871 |
]
|
| 2872 |
|
| 2873 |
-
[[package]]
|
| 2874 |
-
name = "pillow"
|
| 2875 |
-
version = "11.3.0"
|
| 2876 |
-
source = { registry = "https://pypi.org/simple" }
|
| 2877 |
-
sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
|
| 2878 |
-
wheels = [
|
| 2879 |
-
{ url = "https://files.pythonhosted.org/packages/db/26/77f8ed17ca4ffd60e1dcd220a6ec6d71210ba398cfa33a13a1cd614c5613/pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722", size = 5316531, upload-time = "2025-07-01T09:13:59.203Z" },
|
| 2880 |
-
{ url = "https://files.pythonhosted.org/packages/cb/39/ee475903197ce709322a17a866892efb560f57900d9af2e55f86db51b0a5/pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288", size = 4686560, upload-time = "2025-07-01T09:14:01.101Z" },
|
| 2881 |
-
{ url = "https://files.pythonhosted.org/packages/d5/90/442068a160fd179938ba55ec8c97050a612426fae5ec0a764e345839f76d/pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d", size = 5870978, upload-time = "2025-07-03T13:09:55.638Z" },
|
| 2882 |
-
{ url = "https://files.pythonhosted.org/packages/13/92/dcdd147ab02daf405387f0218dcf792dc6dd5b14d2573d40b4caeef01059/pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494", size = 7641168, upload-time = "2025-07-03T13:10:00.37Z" },
|
| 2883 |
-
{ url = "https://files.pythonhosted.org/packages/6e/db/839d6ba7fd38b51af641aa904e2960e7a5644d60ec754c046b7d2aee00e5/pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58", size = 5973053, upload-time = "2025-07-01T09:14:04.491Z" },
|
| 2884 |
-
{ url = "https://files.pythonhosted.org/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f", size = 6640273, upload-time = "2025-07-01T09:14:06.235Z" },
|
| 2885 |
-
{ url = "https://files.pythonhosted.org/packages/45/ad/931694675ede172e15b2ff03c8144a0ddaea1d87adb72bb07655eaffb654/pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e", size = 6082043, upload-time = "2025-07-01T09:14:07.978Z" },
|
| 2886 |
-
{ url = "https://files.pythonhosted.org/packages/3a/04/ba8f2b11fc80d2dd462d7abec16351b45ec99cbbaea4387648a44190351a/pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94", size = 6715516, upload-time = "2025-07-01T09:14:10.233Z" },
|
| 2887 |
-
{ url = "https://files.pythonhosted.org/packages/48/59/8cd06d7f3944cc7d892e8533c56b0acb68399f640786313275faec1e3b6f/pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0", size = 6274768, upload-time = "2025-07-01T09:14:11.921Z" },
|
| 2888 |
-
{ url = "https://files.pythonhosted.org/packages/f1/cc/29c0f5d64ab8eae20f3232da8f8571660aa0ab4b8f1331da5c2f5f9a938e/pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac", size = 6986055, upload-time = "2025-07-01T09:14:13.623Z" },
|
| 2889 |
-
{ url = "https://files.pythonhosted.org/packages/c6/df/90bd886fabd544c25addd63e5ca6932c86f2b701d5da6c7839387a076b4a/pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd", size = 2423079, upload-time = "2025-07-01T09:14:15.268Z" },
|
| 2890 |
-
{ url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" },
|
| 2891 |
-
{ url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" },
|
| 2892 |
-
{ url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" },
|
| 2893 |
-
{ url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" },
|
| 2894 |
-
{ url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" },
|
| 2895 |
-
{ url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" },
|
| 2896 |
-
{ url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" },
|
| 2897 |
-
{ url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" },
|
| 2898 |
-
{ url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" },
|
| 2899 |
-
{ url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" },
|
| 2900 |
-
{ url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" },
|
| 2901 |
-
{ url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
|
| 2902 |
-
{ url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
|
| 2903 |
-
{ url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
|
| 2904 |
-
{ url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
|
| 2905 |
-
{ url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
|
| 2906 |
-
{ url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
|
| 2907 |
-
{ url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
|
| 2908 |
-
{ url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
|
| 2909 |
-
{ url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
|
| 2910 |
-
{ url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
|
| 2911 |
-
{ url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
|
| 2912 |
-
{ url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
|
| 2913 |
-
{ url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
|
| 2914 |
-
{ url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
|
| 2915 |
-
{ url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
|
| 2916 |
-
{ url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
|
| 2917 |
-
{ url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
|
| 2918 |
-
{ url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
|
| 2919 |
-
{ url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
|
| 2920 |
-
{ url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
|
| 2921 |
-
{ url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
|
| 2922 |
-
{ url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
|
| 2923 |
-
{ url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
|
| 2924 |
-
{ url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
|
| 2925 |
-
{ url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
|
| 2926 |
-
{ url = "https://files.pythonhosted.org/packages/9e/e3/6fa84033758276fb31da12e5fb66ad747ae83b93c67af17f8c6ff4cc8f34/pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6", size = 5270566, upload-time = "2025-07-01T09:16:19.801Z" },
|
| 2927 |
-
{ url = "https://files.pythonhosted.org/packages/5b/ee/e8d2e1ab4892970b561e1ba96cbd59c0d28cf66737fc44abb2aec3795a4e/pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438", size = 4654618, upload-time = "2025-07-01T09:16:21.818Z" },
|
| 2928 |
-
{ url = "https://files.pythonhosted.org/packages/f2/6d/17f80f4e1f0761f02160fc433abd4109fa1548dcfdca46cfdadaf9efa565/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3", size = 4874248, upload-time = "2025-07-03T13:11:20.738Z" },
|
| 2929 |
-
{ url = "https://files.pythonhosted.org/packages/de/5f/c22340acd61cef960130585bbe2120e2fd8434c214802f07e8c03596b17e/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c", size = 6583963, upload-time = "2025-07-03T13:11:26.283Z" },
|
| 2930 |
-
{ url = "https://files.pythonhosted.org/packages/31/5e/03966aedfbfcbb4d5f8aa042452d3361f325b963ebbadddac05b122e47dd/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361", size = 4957170, upload-time = "2025-07-01T09:16:23.762Z" },
|
| 2931 |
-
{ url = "https://files.pythonhosted.org/packages/cc/2d/e082982aacc927fc2cab48e1e731bdb1643a1406acace8bed0900a61464e/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7", size = 5581505, upload-time = "2025-07-01T09:16:25.593Z" },
|
| 2932 |
-
{ url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" },
|
| 2933 |
-
]
|
| 2934 |
-
|
| 2935 |
[[package]]
|
| 2936 |
name = "platformdirs"
|
| 2937 |
version = "4.5.0"
|
|
@@ -3292,19 +2945,6 @@ wheels = [
|
|
| 3292 |
{ url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" },
|
| 3293 |
]
|
| 3294 |
|
| 3295 |
-
[[package]]
|
| 3296 |
-
name = "pydeck"
|
| 3297 |
-
version = "0.9.1"
|
| 3298 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3299 |
-
dependencies = [
|
| 3300 |
-
{ name = "jinja2" },
|
| 3301 |
-
{ name = "numpy" },
|
| 3302 |
-
]
|
| 3303 |
-
sdist = { url = "https://files.pythonhosted.org/packages/a1/ca/40e14e196864a0f61a92abb14d09b3d3da98f94ccb03b49cf51688140dab/pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605", size = 3832240, upload-time = "2024-05-10T15:36:21.153Z" }
|
| 3304 |
-
wheels = [
|
| 3305 |
-
{ url = "https://files.pythonhosted.org/packages/ab/4c/b888e6cf58bd9db9c93f40d1c6be8283ff49d88919231afe93a6bcf61626/pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038", size = 6900403, upload-time = "2024-05-10T15:36:17.36Z" },
|
| 3306 |
-
]
|
| 3307 |
-
|
| 3308 |
[[package]]
|
| 3309 |
name = "pygments"
|
| 3310 |
version = "2.19.2"
|
|
@@ -3370,47 +3010,6 @@ wheels = [
|
|
| 3370 |
{ url = "https://files.pythonhosted.org/packages/08/e2/7d3a30ac905c99ea93729e03d2bb3d16fec26a789e98407d61cb368ab4bb/pymongo-4.12.1-cp313-cp313t-win_amd64.whl", hash = "sha256:46d86cf91ee9609d0713242a1d99fa9e9c60b4315e1a067b9a9e769bedae629d", size = 1003332, upload-time = "2025-04-29T18:45:54.631Z" },
|
| 3371 |
]
|
| 3372 |
|
| 3373 |
-
[[package]]
|
| 3374 |
-
name = "pyowm"
|
| 3375 |
-
version = "3.3.0"
|
| 3376 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3377 |
-
dependencies = [
|
| 3378 |
-
{ name = "geojson" },
|
| 3379 |
-
{ name = "pysocks" },
|
| 3380 |
-
{ name = "requests", extra = ["socks"] },
|
| 3381 |
-
]
|
| 3382 |
-
sdist = { url = "https://files.pythonhosted.org/packages/43/5a/0f44ccc4b81d23b9c84c821532e269727a64f3335c4231c9c16702809719/pyowm-3.3.0.tar.gz", hash = "sha256:8196f77c91eac680676ed5ee484aae8a165408055e3e2b28025cbf60b8681e03", size = 4516988, upload-time = "2022-02-14T21:31:59.957Z" }
|
| 3383 |
-
wheels = [
|
| 3384 |
-
{ url = "https://files.pythonhosted.org/packages/6e/88/1279817aa7f5988a2ff42a6755fd371f3c1806aca377cb63b3d16684b174/pyowm-3.3.0-py3-none-any.whl", hash = "sha256:86463108e7613171531ba306040b43c972b3fc0b0acf73b12c50910cdd2107ab", size = 4547201, upload-time = "2022-02-14T21:33:18.997Z" },
|
| 3385 |
-
]
|
| 3386 |
-
|
| 3387 |
-
[[package]]
|
| 3388 |
-
name = "pypdf"
|
| 3389 |
-
version = "5.3.1"
|
| 3390 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3391 |
-
sdist = { url = "https://files.pythonhosted.org/packages/da/5b/67df68ec4b934aae9ca89edfb43a869c5edb3bd504dd275be9e83001d3e9/pypdf-5.3.1.tar.gz", hash = "sha256:0b9b715252b3c60bacc052e6a780e8b742cee9b9a2135f6007bb018e22a5adad", size = 5011845, upload-time = "2025-03-02T09:03:39.457Z" }
|
| 3392 |
-
wheels = [
|
| 3393 |
-
{ url = "https://files.pythonhosted.org/packages/f4/0c/75da081f5948e07f373a92087e4808739a3248d308f01c78c9bd4a51defa/pypdf-5.3.1-py3-none-any.whl", hash = "sha256:20ea5b8686faad1b695fda054462b667d5e5f51e25fbbc092f12c5e0bb20d738", size = 302042, upload-time = "2025-03-02T09:03:36.679Z" },
|
| 3394 |
-
]
|
| 3395 |
-
|
| 3396 |
-
[[package]]
|
| 3397 |
-
name = "pyreadline3"
|
| 3398 |
-
version = "3.5.4"
|
| 3399 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3400 |
-
sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839, upload-time = "2024-09-19T02:40:10.062Z" }
|
| 3401 |
-
wheels = [
|
| 3402 |
-
{ url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178, upload-time = "2024-09-19T02:40:08.598Z" },
|
| 3403 |
-
]
|
| 3404 |
-
|
| 3405 |
-
[[package]]
|
| 3406 |
-
name = "pysocks"
|
| 3407 |
-
version = "1.7.1"
|
| 3408 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3409 |
-
sdist = { url = "https://files.pythonhosted.org/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429, upload-time = "2019-09-20T02:07:35.714Z" }
|
| 3410 |
-
wheels = [
|
| 3411 |
-
{ url = "https://files.pythonhosted.org/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725, upload-time = "2019-09-20T02:06:22.938Z" },
|
| 3412 |
-
]
|
| 3413 |
-
|
| 3414 |
[[package]]
|
| 3415 |
name = "pytest"
|
| 3416 |
version = "9.0.1"
|
|
@@ -3496,15 +3095,6 @@ wheels = [
|
|
| 3496 |
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
| 3497 |
]
|
| 3498 |
|
| 3499 |
-
[[package]]
|
| 3500 |
-
name = "pytz"
|
| 3501 |
-
version = "2025.2"
|
| 3502 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3503 |
-
sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" }
|
| 3504 |
-
wheels = [
|
| 3505 |
-
{ url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
|
| 3506 |
-
]
|
| 3507 |
-
|
| 3508 |
[[package]]
|
| 3509 |
name = "pywin32"
|
| 3510 |
version = "311"
|
|
@@ -3651,11 +3241,6 @@ wheels = [
|
|
| 3651 |
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
| 3652 |
]
|
| 3653 |
|
| 3654 |
-
[package.optional-dependencies]
|
| 3655 |
-
socks = [
|
| 3656 |
-
{ name = "pysocks" },
|
| 3657 |
-
]
|
| 3658 |
-
|
| 3659 |
[[package]]
|
| 3660 |
name = "requests-toolbelt"
|
| 3661 |
version = "1.0.0"
|
|
@@ -3858,15 +3443,6 @@ wheels = [
|
|
| 3858 |
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
| 3859 |
]
|
| 3860 |
|
| 3861 |
-
[[package]]
|
| 3862 |
-
name = "smmap"
|
| 3863 |
-
version = "5.0.2"
|
| 3864 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3865 |
-
sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" }
|
| 3866 |
-
wheels = [
|
| 3867 |
-
{ url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" },
|
| 3868 |
-
]
|
| 3869 |
-
|
| 3870 |
[[package]]
|
| 3871 |
name = "sniffio"
|
| 3872 |
version = "1.3.1"
|
|
@@ -3965,35 +3541,6 @@ wheels = [
|
|
| 3965 |
{ url = "https://files.pythonhosted.org/packages/8b/0c/9d30a4ebeb6db2b25a841afbb80f6ef9a854fc3b41be131d249a977b4959/starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35", size = 72037, upload-time = "2025-04-13T13:56:16.21Z" },
|
| 3966 |
]
|
| 3967 |
|
| 3968 |
-
[[package]]
|
| 3969 |
-
name = "streamlit"
|
| 3970 |
-
version = "1.52.1"
|
| 3971 |
-
source = { registry = "https://pypi.org/simple" }
|
| 3972 |
-
dependencies = [
|
| 3973 |
-
{ name = "altair" },
|
| 3974 |
-
{ name = "blinker" },
|
| 3975 |
-
{ name = "cachetools" },
|
| 3976 |
-
{ name = "click" },
|
| 3977 |
-
{ name = "gitpython" },
|
| 3978 |
-
{ name = "numpy" },
|
| 3979 |
-
{ name = "packaging" },
|
| 3980 |
-
{ name = "pandas" },
|
| 3981 |
-
{ name = "pillow" },
|
| 3982 |
-
{ name = "protobuf" },
|
| 3983 |
-
{ name = "pyarrow" },
|
| 3984 |
-
{ name = "pydeck" },
|
| 3985 |
-
{ name = "requests" },
|
| 3986 |
-
{ name = "tenacity" },
|
| 3987 |
-
{ name = "toml" },
|
| 3988 |
-
{ name = "tornado" },
|
| 3989 |
-
{ name = "typing-extensions" },
|
| 3990 |
-
{ name = "watchdog", marker = "sys_platform != 'darwin'" },
|
| 3991 |
-
]
|
| 3992 |
-
sdist = { url = "https://files.pythonhosted.org/packages/6f/ec/66ca1578587bbaaea5757a7f074d3f762fff8d782ffb4497a83c5722aa44/streamlit-1.52.1.tar.gz", hash = "sha256:b036a71866b893c97fdebaa2a2ebd21ebf2af7daea4b3abe783a57b26f55b3ca", size = 8582829, upload-time = "2025-12-05T18:55:42.006Z" }
|
| 3993 |
-
wheels = [
|
| 3994 |
-
{ url = "https://files.pythonhosted.org/packages/d0/d4/cdafd4cc940937410f465ca7a77dd34237182c2ddece624e08db959496f8/streamlit-1.52.1-py3-none-any.whl", hash = "sha256:97fee2c3421d350fd65548e45a20f506ec1b651d78f95ecacbc0c2f9f838081c", size = 9024748, upload-time = "2025-12-05T18:55:39.713Z" },
|
| 3995 |
-
]
|
| 3996 |
-
|
| 3997 |
[[package]]
|
| 3998 |
name = "structlog"
|
| 3999 |
version = "25.5.0"
|
|
@@ -4003,18 +3550,6 @@ wheels = [
|
|
| 4003 |
{ url = "https://files.pythonhosted.org/packages/a8/45/a132b9074aa18e799b891b91ad72133c98d8042c70f6240e4c5f9dabee2f/structlog-25.5.0-py3-none-any.whl", hash = "sha256:a8453e9b9e636ec59bd9e79bbd4a72f025981b3ba0f5837aebf48f02f37a7f9f", size = 72510, upload-time = "2025-10-27T08:28:21.535Z" },
|
| 4004 |
]
|
| 4005 |
|
| 4006 |
-
[[package]]
|
| 4007 |
-
name = "sympy"
|
| 4008 |
-
version = "1.14.0"
|
| 4009 |
-
source = { registry = "https://pypi.org/simple" }
|
| 4010 |
-
dependencies = [
|
| 4011 |
-
{ name = "mpmath" },
|
| 4012 |
-
]
|
| 4013 |
-
sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" }
|
| 4014 |
-
wheels = [
|
| 4015 |
-
{ url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
|
| 4016 |
-
]
|
| 4017 |
-
|
| 4018 |
[[package]]
|
| 4019 |
name = "tenacity"
|
| 4020 |
version = "9.1.2"
|
|
@@ -4064,15 +3599,6 @@ wheels = [
|
|
| 4064 |
{ url = "https://files.pythonhosted.org/packages/93/e0/6cc82a562bc6365785a3ff0af27a2a092d57c47d7a81d9e2295d8c36f011/tiktoken-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dc2dd125a62cb2b3d858484d6c614d136b5b848976794edfb63688d539b8b93f", size = 878777, upload-time = "2025-10-06T20:22:18.036Z" },
|
| 4065 |
]
|
| 4066 |
|
| 4067 |
-
[[package]]
|
| 4068 |
-
name = "toml"
|
| 4069 |
-
version = "0.10.2"
|
| 4070 |
-
source = { registry = "https://pypi.org/simple" }
|
| 4071 |
-
sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" }
|
| 4072 |
-
wheels = [
|
| 4073 |
-
{ url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" },
|
| 4074 |
-
]
|
| 4075 |
-
|
| 4076 |
[[package]]
|
| 4077 |
name = "tomli"
|
| 4078 |
version = "2.3.0"
|
|
@@ -4107,22 +3633,27 @@ wheels = [
|
|
| 4107 |
]
|
| 4108 |
|
| 4109 |
[[package]]
|
| 4110 |
-
name = "
|
| 4111 |
-
version = "
|
| 4112 |
source = { registry = "https://pypi.org/simple" }
|
| 4113 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 4114 |
wheels = [
|
| 4115 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4116 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4117 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4118 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4119 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4120 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4121 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4122 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4123 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4124 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 4125 |
-
{ url = "https://files.pythonhosted.org/packages/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4126 |
]
|
| 4127 |
|
| 4128 |
[[package]]
|
|
@@ -4234,24 +3765,6 @@ wheels = [
|
|
| 4234 |
{ url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" },
|
| 4235 |
]
|
| 4236 |
|
| 4237 |
-
[[package]]
|
| 4238 |
-
name = "watchdog"
|
| 4239 |
-
version = "6.0.0"
|
| 4240 |
-
source = { registry = "https://pypi.org/simple" }
|
| 4241 |
-
sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" }
|
| 4242 |
-
wheels = [
|
| 4243 |
-
{ url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" },
|
| 4244 |
-
{ url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" },
|
| 4245 |
-
{ url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" },
|
| 4246 |
-
{ url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" },
|
| 4247 |
-
{ url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" },
|
| 4248 |
-
{ url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" },
|
| 4249 |
-
{ url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" },
|
| 4250 |
-
{ url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" },
|
| 4251 |
-
{ url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" },
|
| 4252 |
-
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
|
| 4253 |
-
]
|
| 4254 |
-
|
| 4255 |
[[package]]
|
| 4256 |
name = "watchfiles"
|
| 4257 |
version = "1.1.1"
|
|
|
|
| 8 |
]
|
| 9 |
|
| 10 |
[[package]]
|
| 11 |
+
name = "chatbot"
|
| 12 |
version = "0.1.0"
|
| 13 |
source = { virtual = "." }
|
| 14 |
dependencies = [
|
| 15 |
{ name = "ddgs" },
|
|
|
|
| 16 |
{ name = "duckduckgo-search" },
|
| 17 |
{ name = "fastapi" },
|
| 18 |
{ name = "grpcio" },
|
|
|
|
| 35 |
{ name = "langgraph-checkpoint-mongodb" },
|
| 36 |
{ name = "langgraph-checkpoint-postgres" },
|
| 37 |
{ name = "langgraph-checkpoint-sqlite" },
|
|
|
|
| 38 |
{ name = "langsmith" },
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
{ name = "psycopg", extra = ["binary", "pool"] },
|
|
|
|
| 40 |
{ name = "pydantic" },
|
| 41 |
{ name = "pydantic-settings" },
|
|
|
|
|
|
|
| 42 |
{ name = "python-dotenv" },
|
| 43 |
{ name = "setuptools" },
|
|
|
|
| 44 |
{ name = "tiktoken" },
|
| 45 |
+
{ name = "toons" },
|
| 46 |
{ name = "uvicorn" },
|
| 47 |
]
|
| 48 |
|
|
|
|
| 51 |
{ name = "httpx" },
|
| 52 |
{ name = "pydantic" },
|
| 53 |
{ name = "python-dotenv" },
|
|
|
|
| 54 |
]
|
| 55 |
dev = [
|
| 56 |
{ name = "langgraph-cli", extra = ["inmem"] },
|
|
|
|
| 66 |
[package.metadata]
|
| 67 |
requires-dist = [
|
| 68 |
{ name = "ddgs", specifier = ">=9.9.1" },
|
|
|
|
| 69 |
{ name = "duckduckgo-search", specifier = ">=7.3.0" },
|
| 70 |
{ name = "fastapi", specifier = "~=0.115.5" },
|
| 71 |
{ name = "grpcio", specifier = ">=1.68.0" },
|
|
|
|
| 88 |
{ name = "langgraph-checkpoint-mongodb", specifier = "~=0.1.3" },
|
| 89 |
{ name = "langgraph-checkpoint-postgres", specifier = "~=2.0.13" },
|
| 90 |
{ name = "langgraph-checkpoint-sqlite", specifier = "~=2.0.1" },
|
|
|
|
| 91 |
{ name = "langsmith", specifier = "~=0.4.0" },
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
{ name = "psycopg", extras = ["binary", "pool"], specifier = "~=3.2.4" },
|
|
|
|
| 93 |
{ name = "pydantic", specifier = "~=2.10.1" },
|
| 94 |
{ name = "pydantic-settings", specifier = "~=2.12.0" },
|
|
|
|
|
|
|
| 95 |
{ name = "python-dotenv", specifier = "~=1.0.1" },
|
| 96 |
{ name = "setuptools", specifier = "~=75.6.0" },
|
|
|
|
| 97 |
{ name = "tiktoken", specifier = ">=0.8.0" },
|
| 98 |
+
{ name = "toons", specifier = ">=0.5.2" },
|
| 99 |
{ name = "uvicorn", specifier = "~=0.32.1" },
|
| 100 |
]
|
| 101 |
|
|
|
|
| 104 |
{ name = "httpx", specifier = "~=0.28.0" },
|
| 105 |
{ name = "pydantic", specifier = "~=2.10.1" },
|
| 106 |
{ name = "python-dotenv", specifier = "~=1.0.1" },
|
|
|
|
| 107 |
]
|
| 108 |
dev = [
|
| 109 |
{ name = "langgraph-cli", extras = ["inmem"] },
|
|
|
|
| 218 |
{ url = "https://files.pythonhosted.org/packages/f5/10/6c25ed6de94c49f88a91fa5018cb4c0f3625f31d5be9f771ebe5cc7cd506/aiosqlite-0.21.0-py3-none-any.whl", hash = "sha256:2549cf4057f95f53dcba16f2b64e8e2791d7e1adedb13197dd8ed77bb226d7d0", size = 15792, upload-time = "2025-02-03T07:30:13.6Z" },
|
| 219 |
]
|
| 220 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
[[package]]
|
| 222 |
name = "annotated-types"
|
| 223 |
version = "0.7.0"
|
|
|
|
| 310 |
{ url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148, upload-time = "2022-10-05T19:19:30.546Z" },
|
| 311 |
]
|
| 312 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
[[package]]
|
| 314 |
name = "blockbuster"
|
| 315 |
version = "1.5.25"
|
|
|
|
| 606 |
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
|
| 607 |
]
|
| 608 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 609 |
[[package]]
|
| 610 |
name = "coverage"
|
| 611 |
version = "7.11.3"
|
|
|
|
| 778 |
{ url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" },
|
| 779 |
]
|
| 780 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 781 |
[[package]]
|
| 782 |
name = "duckduckgo-search"
|
| 783 |
version = "8.1.1"
|
|
|
|
| 833 |
{ url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" },
|
| 834 |
]
|
| 835 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 836 |
[[package]]
|
| 837 |
name = "forbiddenfruit"
|
| 838 |
version = "0.1.4"
|
|
|
|
| 912 |
{ url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" },
|
| 913 |
]
|
| 914 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 915 |
[[package]]
|
| 916 |
name = "google-ai-generativelanguage"
|
| 917 |
version = "0.9.0"
|
|
|
|
| 1378 |
{ url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" },
|
| 1379 |
]
|
| 1380 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1381 |
[[package]]
|
| 1382 |
name = "hyperframe"
|
| 1383 |
version = "6.1.0"
|
|
|
|
| 1426 |
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
|
| 1427 |
]
|
| 1428 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1429 |
[[package]]
|
| 1430 |
name = "jiter"
|
| 1431 |
version = "0.8.2"
|
|
|
|
| 1992 |
{ url = "https://files.pythonhosted.org/packages/66/05/b2d34e16638241e6f27a6946d28160d4b8b641383787646d41a3727e0896/langgraph_sdk-0.2.9-py3-none-any.whl", hash = "sha256:fbf302edadbf0fb343596f91c597794e936ef68eebc0d3e1d358b6f9f72a1429", size = 56752, upload-time = "2025-09-20T18:49:13.346Z" },
|
| 1993 |
]
|
| 1994 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1995 |
[[package]]
|
| 1996 |
name = "langsmith"
|
| 1997 |
version = "0.4.43"
|
|
|
|
| 2085 |
{ url = "https://files.pythonhosted.org/packages/6c/77/d7f491cbc05303ac6801651aabeb262d43f319288c1ea96c66b1d2692ff3/lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e", size = 3518768, upload-time = "2025-09-22T04:04:57.097Z" },
|
| 2086 |
]
|
| 2087 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2088 |
[[package]]
|
| 2089 |
name = "marshmallow"
|
| 2090 |
version = "3.26.1"
|
|
|
|
| 2131 |
{ url = "https://files.pythonhosted.org/packages/01/9a/35e053d4f442addf751ed20e0e922476508ee580786546d699b0567c4c67/motor-3.7.1-py3-none-any.whl", hash = "sha256:8a63b9049e38eeeb56b4fdd57c3312a6d1f25d01db717fe7d82222393c410298", size = 74996, upload-time = "2025-05-14T18:56:31.665Z" },
|
| 2132 |
]
|
| 2133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2134 |
[[package]]
|
| 2135 |
name = "multidict"
|
| 2136 |
version = "6.7.0"
|
|
|
|
| 2253 |
{ url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
|
| 2254 |
]
|
| 2255 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2256 |
[[package]]
|
| 2257 |
name = "nodeenv"
|
| 2258 |
version = "1.9.1"
|
|
|
|
| 2366 |
{ url = "https://files.pythonhosted.org/packages/47/4f/4a617ee93d8208d2bcf26b2d8b9402ceaed03e3853c754940e2290fed063/ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c", size = 14354, upload-time = "2025-11-13T23:02:16.292Z" },
|
| 2367 |
]
|
| 2368 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2369 |
[[package]]
|
| 2370 |
name = "openai"
|
| 2371 |
version = "2.2.0"
|
|
|
|
| 2564 |
{ url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
|
| 2565 |
]
|
| 2566 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2567 |
[[package]]
|
| 2568 |
name = "pathspec"
|
| 2569 |
version = "0.12.1"
|
|
|
|
| 2585 |
{ url = "https://files.pythonhosted.org/packages/fb/81/f457d6d361e04d061bef413749a6e1ab04d98cfeec6d8abcfe40184750f3/pgvector-0.3.6-py3-none-any.whl", hash = "sha256:f6c269b3c110ccb7496bac87202148ed18f34b390a0189c783e351062400a75a", size = 24880, upload-time = "2024-10-27T00:15:08.045Z" },
|
| 2586 |
]
|
| 2587 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2588 |
[[package]]
|
| 2589 |
name = "platformdirs"
|
| 2590 |
version = "4.5.0"
|
|
|
|
| 2945 |
{ url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" },
|
| 2946 |
]
|
| 2947 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2948 |
[[package]]
|
| 2949 |
name = "pygments"
|
| 2950 |
version = "2.19.2"
|
|
|
|
| 3010 |
{ url = "https://files.pythonhosted.org/packages/08/e2/7d3a30ac905c99ea93729e03d2bb3d16fec26a789e98407d61cb368ab4bb/pymongo-4.12.1-cp313-cp313t-win_amd64.whl", hash = "sha256:46d86cf91ee9609d0713242a1d99fa9e9c60b4315e1a067b9a9e769bedae629d", size = 1003332, upload-time = "2025-04-29T18:45:54.631Z" },
|
| 3011 |
]
|
| 3012 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3013 |
[[package]]
|
| 3014 |
name = "pytest"
|
| 3015 |
version = "9.0.1"
|
|
|
|
| 3095 |
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
| 3096 |
]
|
| 3097 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3098 |
[[package]]
|
| 3099 |
name = "pywin32"
|
| 3100 |
version = "311"
|
|
|
|
| 3241 |
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
| 3242 |
]
|
| 3243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3244 |
[[package]]
|
| 3245 |
name = "requests-toolbelt"
|
| 3246 |
version = "1.0.0"
|
|
|
|
| 3443 |
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
| 3444 |
]
|
| 3445 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3446 |
[[package]]
|
| 3447 |
name = "sniffio"
|
| 3448 |
version = "1.3.1"
|
|
|
|
| 3541 |
{ url = "https://files.pythonhosted.org/packages/8b/0c/9d30a4ebeb6db2b25a841afbb80f6ef9a854fc3b41be131d249a977b4959/starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35", size = 72037, upload-time = "2025-04-13T13:56:16.21Z" },
|
| 3542 |
]
|
| 3543 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3544 |
[[package]]
|
| 3545 |
name = "structlog"
|
| 3546 |
version = "25.5.0"
|
|
|
|
| 3550 |
{ url = "https://files.pythonhosted.org/packages/a8/45/a132b9074aa18e799b891b91ad72133c98d8042c70f6240e4c5f9dabee2f/structlog-25.5.0-py3-none-any.whl", hash = "sha256:a8453e9b9e636ec59bd9e79bbd4a72f025981b3ba0f5837aebf48f02f37a7f9f", size = 72510, upload-time = "2025-10-27T08:28:21.535Z" },
|
| 3551 |
]
|
| 3552 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3553 |
[[package]]
|
| 3554 |
name = "tenacity"
|
| 3555 |
version = "9.1.2"
|
|
|
|
| 3599 |
{ url = "https://files.pythonhosted.org/packages/93/e0/6cc82a562bc6365785a3ff0af27a2a092d57c47d7a81d9e2295d8c36f011/tiktoken-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dc2dd125a62cb2b3d858484d6c614d136b5b848976794edfb63688d539b8b93f", size = 878777, upload-time = "2025-10-06T20:22:18.036Z" },
|
| 3600 |
]
|
| 3601 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3602 |
[[package]]
|
| 3603 |
name = "tomli"
|
| 3604 |
version = "2.3.0"
|
|
|
|
| 3633 |
]
|
| 3634 |
|
| 3635 |
[[package]]
|
| 3636 |
+
name = "toons"
|
| 3637 |
+
version = "0.5.2"
|
| 3638 |
source = { registry = "https://pypi.org/simple" }
|
| 3639 |
+
sdist = { url = "https://files.pythonhosted.org/packages/3f/f2/d54096386b7788e03faf002a2f65a3b18865a03bee1494d61b68daa40c6f/toons-0.5.2.tar.gz", hash = "sha256:f2db1f75d95e7b118399a19d0dbcc2fd128b7ea0f8047c01cddb1f85e03d976f", size = 90914, upload-time = "2026-02-07T21:05:54.923Z" }
|
| 3640 |
wheels = [
|
| 3641 |
+
{ url = "https://files.pythonhosted.org/packages/7e/f4/e08cf0eef8cde5b7ce40538a1c2ffe33504d9a19187343619daa40eec455/toons-0.5.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1e9aedad5813471f73411728f45c9906d1c98264980ca4f4a0369f745634eb8f", size = 306829, upload-time = "2026-02-07T21:05:48.672Z" },
|
| 3642 |
+
{ url = "https://files.pythonhosted.org/packages/42/ff/5e227d03ebe7d01363b4ffe133d417ce1aa5dca3d7b607638edf9cd0d395/toons-0.5.2-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:8abfb1bb166eb09525985007befc0ca160b329b22e39efc4492ed3ae318e9415", size = 300827, upload-time = "2026-02-07T21:05:47.333Z" },
|
| 3643 |
+
{ url = "https://files.pythonhosted.org/packages/43/14/48973b124a11dcf72f2e1382453c0eda0c9f69e8c8114b20ff176567f5d2/toons-0.5.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1981469c1c9196a525f84e683d39d588050e94868cf30416bd90040c0c07b190", size = 332814, upload-time = "2026-02-07T21:05:35.94Z" },
|
| 3644 |
+
{ url = "https://files.pythonhosted.org/packages/57/bc/d5c1b05ea73413b57fefe805a53541ca8560e30f77a9b2fe4d768ef3ab44/toons-0.5.2-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eac2846301cf89a0f7e0dead1605e5385aa5f0abdf14be52454a9ecb11032193", size = 341584, upload-time = "2026-02-07T21:05:37.535Z" },
|
| 3645 |
+
{ url = "https://files.pythonhosted.org/packages/65/33/70ebdabe8e530018d6beeb29a2e4e847bb782669e16f0cddad8c9eb18f32/toons-0.5.2-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64365663c8ee406809fb21839c3d05f1f6a1625f5db66881768a72519674c093", size = 451260, upload-time = "2026-02-07T21:05:38.97Z" },
|
| 3646 |
+
{ url = "https://files.pythonhosted.org/packages/ee/ed/4dc5539c19bb641638db75d8cb957d6a02718344bcb9b4c02d1483bba20f/toons-0.5.2-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8d73c0b74806284e395a05f5545def63d0a514763eea35cdfce3dd947273484", size = 370220, upload-time = "2026-02-07T21:05:40.377Z" },
|
| 3647 |
+
{ url = "https://files.pythonhosted.org/packages/85/66/a6142f1321ca5c36e5b9f8df57acd01a3b0d0843910cacb174051a58692b/toons-0.5.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c831c966c7d20aa82c3902949bea8de7489e9b8c5c246b7e27953b2290634864", size = 344061, upload-time = "2026-02-07T21:05:44.378Z" },
|
| 3648 |
+
{ url = "https://files.pythonhosted.org/packages/a9/29/6fc8c8c7fe7704506b21f5932e1558e1752da107b1a24cd4c8673a889a6f/toons-0.5.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:380059a8746c11ee5032672c78e730cf6598dcd65a442614edafd2ccc762f1e8", size = 364139, upload-time = "2026-02-07T21:05:41.914Z" },
|
| 3649 |
+
{ url = "https://files.pythonhosted.org/packages/7e/87/b813647a6e175b2b5f130619cff3ca0c155087c9c923397f713252fd81be/toons-0.5.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1feb07b10b1cddf0642b0b674ea2f219fb3c498c68be383abde16f001ec21a02", size = 508242, upload-time = "2026-02-07T21:05:49.536Z" },
|
| 3650 |
+
{ url = "https://files.pythonhosted.org/packages/cc/32/24c42ff5c4e64bd6f65e46245045ff89229c948fb49858a95e49f02429b9/toons-0.5.2-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:5316ef0ff48faac69425e21913efb9a54b65b3219f736b9c33013de0399c4110", size = 611823, upload-time = "2026-02-07T21:05:50.61Z" },
|
| 3651 |
+
{ url = "https://files.pythonhosted.org/packages/40/c7/9a3b547980978d85f60caf20d16ff0ae15c435bb32c81473b9b63298693b/toons-0.5.2-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:f0c69b7b8e4ad50bea96fc9391058983caaf948c7a9c59e2315e2a6711cbc4fe", size = 579546, upload-time = "2026-02-07T21:05:52.1Z" },
|
| 3652 |
+
{ url = "https://files.pythonhosted.org/packages/5b/36/a0fc8a06d14891511bb489912eecd0ebb35043b64407c9769ca67682739e/toons-0.5.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d291ca910520166ce94ea1ed02e41b6aa37a7e7201f3cd82da6ee6d255dae212", size = 547496, upload-time = "2026-02-07T21:05:53.467Z" },
|
| 3653 |
+
{ url = "https://files.pythonhosted.org/packages/9d/9d/982a90f90b4adf0ac3d435a50157f79332788f7d49c83e5397fe4ed8fbcc/toons-0.5.2-cp37-abi3-win32.whl", hash = "sha256:6b6637f2605d42f73c2b108ea657db6f4612d133faa6b0fc61ebec780ac38c43", size = 186853, upload-time = "2026-02-07T21:05:56.987Z" },
|
| 3654 |
+
{ url = "https://files.pythonhosted.org/packages/75/a8/3eeb6910ae690763ca110bee2e12b2a536dad2c276bfc2a4cb38d2f6c7a0/toons-0.5.2-cp37-abi3-win_amd64.whl", hash = "sha256:b99219a9dee7de101cb2c41a322c8badf20bf2787b229b62be7830196a91646d", size = 193981, upload-time = "2026-02-07T21:05:55.654Z" },
|
| 3655 |
+
{ url = "https://files.pythonhosted.org/packages/44/ca/9ac107887b21543c56acf26a9af4106e9ebe580d5994d3be47eca60d6ea3/toons-0.5.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39d8bc286843e95d9b054b4cfa80097a0300673ce5e85c40572a3df8edb30f0d", size = 340664, upload-time = "2026-02-07T21:05:45.89Z" },
|
| 3656 |
+
{ url = "https://files.pythonhosted.org/packages/53/ab/311d50a4b63e9df6e7c800f36c60950589ff49367915f647756dffdba3d9/toons-0.5.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:08091dda1fd73efe5e6d912f414dafaef337341456fa4f293489f03ceae86224", size = 360468, upload-time = "2026-02-07T21:05:43.385Z" },
|
| 3657 |
]
|
| 3658 |
|
| 3659 |
[[package]]
|
|
|
|
| 3765 |
{ url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" },
|
| 3766 |
]
|
| 3767 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3768 |
[[package]]
|
| 3769 |
name = "watchfiles"
|
| 3770 |
version = "1.1.1"
|