Spaces:
Running
Running
Geevarghese George
commited on
Commit
·
eda1760
1
Parent(s):
33fcbb0
add oauth
Browse files- .env_reference +1 -1
- README.md +19 -0
- app.py +94 -61
- poetry.lock +41 -43
- pyproject.toml +1 -1
- src/upgrade_advisor/agents/package.py +0 -2
- src/upgrade_advisor/agents/prompts.py +30 -20
- src/upgrade_advisor/chat/chat.py +25 -10
- src/upgrade_advisor/misc.py +18 -1
.env_reference
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
# rename this file to .env and fill in the values below
|
| 2 |
GITHUB_PAT = "your_github_personal_access_token_here"
|
| 3 |
# see https://github.com/github/github-mcp-server?tab=readme-ov-file#default-toolset
|
| 4 |
-
GITHUB_TOOLSETS
|
| 5 |
HF_TOKEN ="hf_"
|
|
|
|
| 1 |
# rename this file to .env and fill in the values below
|
| 2 |
GITHUB_PAT = "your_github_personal_access_token_here"
|
| 3 |
# see https://github.com/github/github-mcp-server?tab=readme-ov-file#default-toolset
|
| 4 |
+
GITHUB_TOOLSETS="default,discussions,experiments"
|
| 5 |
HF_TOKEN ="hf_"
|
README.md
CHANGED
|
@@ -1,4 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# Instructions
|
|
|
|
| 2 |
1. Create a scoped Personal Access Token from GitHub from https://github.com/settings/personal-access-tokens/new with the following scopes, that allows access for public repositories.
|
| 3 |
2. Store as GITHUB_PAT in a .env file in the root directory of the project.
|
| 4 |
|
|
|
|
| 1 |
+
```
|
| 2 |
+
title: FixMyEnv Agent
|
| 3 |
+
emoji: 🐍
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.49.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: mit
|
| 11 |
+
short_description: MCP for Agents that plan your python package upgrade
|
| 12 |
+
hf_oauth: true
|
| 13 |
+
tags:
|
| 14 |
+
- building-mcp-track-enterprise
|
| 15 |
+
- building-mcp-track-customer
|
| 16 |
+
- mcp-in-action-track-customer
|
| 17 |
+
- mcp-in-action-track-enterprise
|
| 18 |
+
---
|
| 19 |
# Instructions
|
| 20 |
+
|
| 21 |
1. Create a scoped Personal Access Token from GitHub from https://github.com/settings/personal-access-tokens/new with the following scopes, that allows access for public repositories.
|
| 22 |
2. Store as GITHUB_PAT in a .env file in the root directory of the project.
|
| 23 |
|
app.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
| 1 |
import logging
|
|
|
|
| 2 |
import shutil
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
import gradio as gr
|
|
|
|
| 6 |
from mcp import StdioServerParameters
|
| 7 |
from mcpadapt.core import MCPAdapt # noqa: F401
|
| 8 |
from smolagents import InferenceClientModel
|
|
@@ -15,7 +17,6 @@ from config import (
|
|
| 15 |
GITHUB_PAT,
|
| 16 |
GITHUB_READ_ONLY,
|
| 17 |
GITHUB_TOOLSETS,
|
| 18 |
-
HF_TOKEN,
|
| 19 |
)
|
| 20 |
from src.upgrade_advisor.agents.package import PackageDiscoveryAgent
|
| 21 |
from src.upgrade_advisor.chat.chat import (
|
|
@@ -27,7 +28,6 @@ from src.upgrade_advisor.misc import (
|
|
| 27 |
_monkeypatch_gradio_save_history,
|
| 28 |
get_example_questions,
|
| 29 |
)
|
| 30 |
-
from src.upgrade_advisor.theme import christmas
|
| 31 |
|
| 32 |
logger = logging.getLogger(__name__)
|
| 33 |
logger.setLevel(logging.INFO)
|
|
@@ -41,10 +41,26 @@ uploads_dir = uploads_dir.resolve()
|
|
| 41 |
_monkeypatch_gradio_save_history()
|
| 42 |
|
| 43 |
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
# parse incoming history is a list of dicts with 'role' and 'content' keys
|
| 46 |
from datetime import datetime
|
| 47 |
|
|
|
|
| 48 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 49 |
logger.info(f"Received message: {message}")
|
| 50 |
logger.info(f"History: {history}")
|
|
@@ -53,6 +69,7 @@ async def chat_fn(message, history, persisted_attachments=None):
|
|
| 53 |
history,
|
| 54 |
turns_cutoff=CHAT_HISTORY_TURNS_CUTOFF,
|
| 55 |
word_cutoff=CHAT_HISTORY_WORD_CUTOFF,
|
|
|
|
| 56 |
)
|
| 57 |
else:
|
| 58 |
summarized_history = ""
|
|
@@ -72,7 +89,9 @@ async def chat_fn(message, history, persisted_attachments=None):
|
|
| 72 |
# overwrite messages with the text content only
|
| 73 |
message = message.strip()
|
| 74 |
rewritten_message, is_rewritten_good = await qn_rewriter(
|
| 75 |
-
message,
|
|
|
|
|
|
|
| 76 |
)
|
| 77 |
|
| 78 |
if is_rewritten_good:
|
|
@@ -113,20 +132,27 @@ async def chat_fn(message, history, persisted_attachments=None):
|
|
| 113 |
logger.info(f"Final message to agent:\n{message}")
|
| 114 |
# Run the package discovery agent to build context
|
| 115 |
context = agent.discover_package_info(
|
| 116 |
-
user_input=message,
|
|
|
|
| 117 |
)
|
| 118 |
# Build a concise context from tool outputs
|
| 119 |
logger.info(f"Built context of length {len(context)}")
|
| 120 |
logger.info(f"Context content:\n{context}")
|
| 121 |
# Run a document QA pass using the user's question
|
| 122 |
qa_answer = await run_document_qa(
|
| 123 |
-
question=message,
|
|
|
|
|
|
|
|
|
|
| 124 |
)
|
| 125 |
logger.info(f"QA answer: {qa_answer}")
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
|
|
|
|
|
|
|
|
|
| 130 |
|
| 131 |
|
| 132 |
def main():
|
|
@@ -162,32 +188,15 @@ def main():
|
|
| 162 |
],
|
| 163 |
)
|
| 164 |
|
| 165 |
-
# pypi_mcp_client = MCPClient(
|
| 166 |
-
# server_parameters=[
|
| 167 |
-
# # pypi_mcp_params,
|
| 168 |
-
# gh_mcp_params,
|
| 169 |
-
# upload_mcp_params,
|
| 170 |
-
# ],
|
| 171 |
-
# structured_output=True,
|
| 172 |
-
# )
|
| 173 |
-
|
| 174 |
-
model = InferenceClientModel(
|
| 175 |
-
token=HF_TOKEN,
|
| 176 |
-
model_id=AGENT_MODEL,
|
| 177 |
-
)
|
| 178 |
-
|
| 179 |
# Gradio chat interface state to persist uploaded files
|
| 180 |
files_state = gr.State([])
|
| 181 |
-
|
| 182 |
-
# with MCPAdapt(
|
| 183 |
-
# serverparams=[
|
| 184 |
-
# gh_mcp_params,
|
| 185 |
-
# upload_mcp_params,
|
| 186 |
-
# ],
|
| 187 |
-
# adapter=SmolAgentsAdapter(structured_output=True),
|
| 188 |
-
# ) as toolset:
|
| 189 |
example_questions = get_example_questions(n=4)
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
with MCPClient(
|
| 192 |
server_parameters=[
|
| 193 |
gh_mcp_params,
|
|
@@ -198,39 +207,63 @@ def main():
|
|
| 198 |
logger.info("MCP clients connected successfully")
|
| 199 |
|
| 200 |
global agent
|
|
|
|
| 201 |
agent = PackageDiscoveryAgent(
|
| 202 |
model=model,
|
| 203 |
tools=toolset,
|
| 204 |
)
|
| 205 |
-
#
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
demo.launch(mcp_server=True, share=False)
|
| 235 |
|
| 236 |
finally:
|
|
|
|
| 1 |
import logging
|
| 2 |
+
import os
|
| 3 |
import shutil
|
| 4 |
from pathlib import Path
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
+
from huggingface_hub import whoami # noqa: F401
|
| 8 |
from mcp import StdioServerParameters
|
| 9 |
from mcpadapt.core import MCPAdapt # noqa: F401
|
| 10 |
from smolagents import InferenceClientModel
|
|
|
|
| 17 |
GITHUB_PAT,
|
| 18 |
GITHUB_READ_ONLY,
|
| 19 |
GITHUB_TOOLSETS,
|
|
|
|
| 20 |
)
|
| 21 |
from src.upgrade_advisor.agents.package import PackageDiscoveryAgent
|
| 22 |
from src.upgrade_advisor.chat.chat import (
|
|
|
|
| 28 |
_monkeypatch_gradio_save_history,
|
| 29 |
get_example_questions,
|
| 30 |
)
|
|
|
|
| 31 |
|
| 32 |
logger = logging.getLogger(__name__)
|
| 33 |
logger.setLevel(logging.INFO)
|
|
|
|
| 41 |
_monkeypatch_gradio_save_history()
|
| 42 |
|
| 43 |
|
| 44 |
+
def get_agent_model(model_name: str, oauth_token: gr.OAuthToken = None):
|
| 45 |
+
token = os.getenv("HF_TOKEN", None) or oauth_token.token if oauth_token else None
|
| 46 |
+
model = InferenceClientModel(
|
| 47 |
+
token=token,
|
| 48 |
+
model_id=model_name,
|
| 49 |
+
)
|
| 50 |
+
return model
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
async def chat_fn(
|
| 54 |
+
message,
|
| 55 |
+
history,
|
| 56 |
+
persisted_attachments=None,
|
| 57 |
+
profile: gr.OAuthProfile = None,
|
| 58 |
+
oauth_token: gr.OAuthToken = None,
|
| 59 |
+
):
|
| 60 |
# parse incoming history is a list of dicts with 'role' and 'content' keys
|
| 61 |
from datetime import datetime
|
| 62 |
|
| 63 |
+
token = os.getenv("HF_TOKEN", None) or oauth_token.token if oauth_token else None
|
| 64 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 65 |
logger.info(f"Received message: {message}")
|
| 66 |
logger.info(f"History: {history}")
|
|
|
|
| 69 |
history,
|
| 70 |
turns_cutoff=CHAT_HISTORY_TURNS_CUTOFF,
|
| 71 |
word_cutoff=CHAT_HISTORY_WORD_CUTOFF,
|
| 72 |
+
token=token,
|
| 73 |
)
|
| 74 |
else:
|
| 75 |
summarized_history = ""
|
|
|
|
| 89 |
# overwrite messages with the text content only
|
| 90 |
message = message.strip()
|
| 91 |
rewritten_message, is_rewritten_good = await qn_rewriter(
|
| 92 |
+
message,
|
| 93 |
+
summarized_history,
|
| 94 |
+
token=token,
|
| 95 |
)
|
| 96 |
|
| 97 |
if is_rewritten_good:
|
|
|
|
| 132 |
logger.info(f"Final message to agent:\n{message}")
|
| 133 |
# Run the package discovery agent to build context
|
| 134 |
context = agent.discover_package_info(
|
| 135 |
+
user_input=message,
|
| 136 |
+
reframed_question=rewritten_message,
|
| 137 |
)
|
| 138 |
# Build a concise context from tool outputs
|
| 139 |
logger.info(f"Built context of length {len(context)}")
|
| 140 |
logger.info(f"Context content:\n{context}")
|
| 141 |
# Run a document QA pass using the user's question
|
| 142 |
qa_answer = await run_document_qa(
|
| 143 |
+
question=message,
|
| 144 |
+
context=context,
|
| 145 |
+
rewritten_question=rewritten_message,
|
| 146 |
+
token=token,
|
| 147 |
)
|
| 148 |
logger.info(f"QA answer: {qa_answer}")
|
| 149 |
+
yield (
|
| 150 |
+
{
|
| 151 |
+
"role": "assistant",
|
| 152 |
+
"content": qa_answer,
|
| 153 |
+
},
|
| 154 |
+
attachments,
|
| 155 |
+
)
|
| 156 |
|
| 157 |
|
| 158 |
def main():
|
|
|
|
| 188 |
],
|
| 189 |
)
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
# Gradio chat interface state to persist uploaded files
|
| 192 |
files_state = gr.State([])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
example_questions = get_example_questions(n=4)
|
| 194 |
|
| 195 |
+
# TODO: use the gr.Blocks to add login blocks
|
| 196 |
+
# Add login with huggingface hub to cache token: https://www.gradio.app/guides/sharing-your-app#o-auth-login-via-hugging-face
|
| 197 |
+
# use a limited token with read-only access to public repoos only
|
| 198 |
+
# (GITHUB_PAT)
|
| 199 |
+
# deploy to hf spaces with mcp server enabled
|
| 200 |
with MCPClient(
|
| 201 |
server_parameters=[
|
| 202 |
gh_mcp_params,
|
|
|
|
| 207 |
logger.info("MCP clients connected successfully")
|
| 208 |
|
| 209 |
global agent
|
| 210 |
+
model = get_agent_model(model_name=AGENT_MODEL)
|
| 211 |
agent = PackageDiscoveryAgent(
|
| 212 |
model=model,
|
| 213 |
tools=toolset,
|
| 214 |
)
|
| 215 |
+
# rewrite with Blocks
|
| 216 |
+
with gr.Blocks() as demo:
|
| 217 |
+
gr.LoginButton()
|
| 218 |
+
gr.Markdown("# 💻 FixMyEnv: Package Upgrade Advisor 🚀🔧🐍📦⚙️")
|
| 219 |
+
gr.Markdown(
|
| 220 |
+
f"""
|
| 221 |
+
Welcome to the Package Upgrade Advisor!
|
| 222 |
+
This AI-powered assistant helps you identify and resolve
|
| 223 |
+
outdated or vulnerable packages in your Python projects.
|
| 224 |
+
Simply ask a question about package upgrades, and if you
|
| 225 |
+
have a `pyproject.toml` or `requirements.txt` file, feel free
|
| 226 |
+
to attach it for more tailored advice.
|
| 227 |
+
|
| 228 |
+
## How to use:
|
| 229 |
+
1. Type your question in the chat box below.
|
| 230 |
+
2. (Optional) Attach your `pyproject.toml` or `requirements.txt`
|
| 231 |
+
file using the upload button. Uploaded files are
|
| 232 |
+
immediately removed after the session ends.
|
| 233 |
+
3. Click "Submit" and wait for the AI to analyze your query
|
| 234 |
+
and provide recommendations.
|
| 235 |
+
|
| 236 |
+
Note: The assistant uses Huggingface Inference API for
|
| 237 |
+
[{AGENT_MODEL}](https://huggingface.co/{AGENT_MODEL}) LLM
|
| 238 |
+
capabilities with Smolagents Tool calling and GitHub MCP
|
| 239 |
+
for package data retrieval. Huggingface login is therefore
|
| 240 |
+
required to use the app. This gradio app serves as an MCP Server
|
| 241 |
+
as well!
|
| 242 |
+
"""
|
| 243 |
+
)
|
| 244 |
+
gr.ChatInterface(
|
| 245 |
+
fn=chat_fn,
|
| 246 |
+
chatbot=gr.Chatbot(
|
| 247 |
+
height=600,
|
| 248 |
+
),
|
| 249 |
+
additional_inputs_accordion="""
|
| 250 |
+
You may attach a pyproject.toml or requirements.txt file to get
|
| 251 |
+
specific upgrade advice for your project.
|
| 252 |
+
""",
|
| 253 |
+
textbox=gr.MultimodalTextbox(
|
| 254 |
+
label="pyproject.toml or requirements.txt file can be attached",
|
| 255 |
+
file_types=[".toml", ".txt"],
|
| 256 |
+
file_count="single",
|
| 257 |
+
min_width=100,
|
| 258 |
+
sources="upload",
|
| 259 |
+
),
|
| 260 |
+
additional_inputs=[files_state],
|
| 261 |
+
additional_outputs=[files_state],
|
| 262 |
+
save_history=True,
|
| 263 |
+
examples=example_questions,
|
| 264 |
+
stop_btn=True,
|
| 265 |
+
# theme=christmas,
|
| 266 |
+
)
|
| 267 |
demo.launch(mcp_server=True, share=False)
|
| 268 |
|
| 269 |
finally:
|
poetry.lock
CHANGED
|
@@ -129,6 +129,21 @@ files = [
|
|
| 129 |
{file = "audioop_lts-0.2.2.tar.gz", hash = "sha256:64d0c62d88e67b98a1a5e71987b7aa7b5bcffc7dcee65b635823dbdd0a8dbbd0"},
|
| 130 |
]
|
| 131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
[[package]]
|
| 133 |
name = "backports-asyncio-runner"
|
| 134 |
version = "1.2.0"
|
|
@@ -799,40 +814,41 @@ tqdm = ["tqdm"]
|
|
| 799 |
|
| 800 |
[[package]]
|
| 801 |
name = "gradio"
|
| 802 |
-
version = "
|
| 803 |
description = "Python library for easily interacting with trained machine learning models"
|
| 804 |
optional = false
|
| 805 |
python-versions = ">=3.10"
|
| 806 |
groups = ["main"]
|
| 807 |
files = [
|
| 808 |
-
{file = "gradio-
|
| 809 |
-
{file = "gradio-
|
| 810 |
]
|
| 811 |
|
| 812 |
[package.dependencies]
|
| 813 |
aiofiles = ">=22.0,<25.0"
|
| 814 |
anyio = ">=3.0,<5.0"
|
| 815 |
audioop-lts = {version = "<1.0", markers = "python_version >= \"3.13\""}
|
|
|
|
| 816 |
brotli = ">=1.1.0"
|
| 817 |
fastapi = ">=0.115.2,<1.0"
|
| 818 |
ffmpy = "*"
|
| 819 |
-
gradio-client = "
|
| 820 |
groovy = ">=0.1,<1.0"
|
| 821 |
httpx = ">=0.24.1,<1.0"
|
| 822 |
huggingface-hub = ">=0.33.5,<2.0"
|
|
|
|
| 823 |
jinja2 = "<4.0"
|
| 824 |
markupsafe = ">=2.0,<4.0"
|
| 825 |
numpy = ">=1.0,<3.0"
|
| 826 |
orjson = ">=3.0,<4.0"
|
| 827 |
packaging = "*"
|
| 828 |
pandas = ">=1.0,<3.0"
|
| 829 |
-
pillow = ">=8.0,<
|
| 830 |
-
pydantic = ">=2.
|
| 831 |
pydub = "*"
|
| 832 |
python-multipart = ">=0.0.18"
|
| 833 |
pyyaml = ">=5.0,<7.0"
|
| 834 |
-
|
| 835 |
-
safehttpx = ">=0.1.6,<0.2.0"
|
| 836 |
semantic-version = ">=2.0,<3.0"
|
| 837 |
starlette = ">=0.40.0,<1.0"
|
| 838 |
tomlkit = ">=0.12.0,<0.14.0"
|
|
@@ -841,19 +857,19 @@ typing-extensions = ">=4.0,<5.0"
|
|
| 841 |
uvicorn = ">=0.14.0"
|
| 842 |
|
| 843 |
[package.extras]
|
| 844 |
-
mcp = ["mcp (
|
| 845 |
oauth = ["authlib", "itsdangerous"]
|
| 846 |
|
| 847 |
[[package]]
|
| 848 |
name = "gradio-client"
|
| 849 |
-
version = "
|
| 850 |
description = "Python library for easily interacting with trained machine learning models"
|
| 851 |
optional = false
|
| 852 |
python-versions = ">=3.10"
|
| 853 |
groups = ["main"]
|
| 854 |
files = [
|
| 855 |
-
{file = "gradio_client-
|
| 856 |
-
{file = "gradio_client-
|
| 857 |
]
|
| 858 |
|
| 859 |
[package.dependencies]
|
|
@@ -862,7 +878,6 @@ httpx = ">=0.24.1"
|
|
| 862 |
huggingface-hub = ">=0.19.3,<2.0"
|
| 863 |
packaging = "*"
|
| 864 |
typing-extensions = ">=4.0,<5.0"
|
| 865 |
-
websockets = ">=13.0,<16.0"
|
| 866 |
|
| 867 |
[[package]]
|
| 868 |
name = "groovy"
|
|
@@ -1096,6 +1111,18 @@ files = [
|
|
| 1096 |
{file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"},
|
| 1097 |
]
|
| 1098 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1099 |
[[package]]
|
| 1100 |
name = "jinja2"
|
| 1101 |
version = "3.1.6"
|
|
@@ -2708,35 +2735,6 @@ files = [
|
|
| 2708 |
{file = "rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea"},
|
| 2709 |
]
|
| 2710 |
|
| 2711 |
-
[[package]]
|
| 2712 |
-
name = "ruff"
|
| 2713 |
-
version = "0.14.5"
|
| 2714 |
-
description = "An extremely fast Python linter and code formatter, written in Rust."
|
| 2715 |
-
optional = false
|
| 2716 |
-
python-versions = ">=3.7"
|
| 2717 |
-
groups = ["main"]
|
| 2718 |
-
files = [
|
| 2719 |
-
{file = "ruff-0.14.5-py3-none-linux_armv6l.whl", hash = "sha256:f3b8248123b586de44a8018bcc9fefe31d23dda57a34e6f0e1e53bd51fd63594"},
|
| 2720 |
-
{file = "ruff-0.14.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f7a75236570318c7a30edd7f5491945f0169de738d945ca8784500b517163a72"},
|
| 2721 |
-
{file = "ruff-0.14.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d146132d1ee115f8802356a2dc9a634dbf58184c51bff21f313e8cd1c74899a"},
|
| 2722 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2380596653dcd20b057794d55681571a257a42327da8894b93bbd6111aa801f"},
|
| 2723 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d1fa985a42b1f075a098fa1ab9d472b712bdb17ad87a8ec86e45e7fa6273e68"},
|
| 2724 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88f0770d42b7fa02bbefddde15d235ca3aa24e2f0137388cc15b2dcbb1f7c7a7"},
|
| 2725 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3676cb02b9061fee7294661071c4709fa21419ea9176087cb77e64410926eb78"},
|
| 2726 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b595bedf6bc9cab647c4a173a61acf4f1ac5f2b545203ba82f30fcb10b0318fb"},
|
| 2727 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f55382725ad0bdb2e8ee2babcbbfb16f124f5a59496a2f6a46f1d9d99d93e6e2"},
|
| 2728 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7497d19dce23976bdaca24345ae131a1d38dcfe1b0850ad8e9e6e4fa321a6e19"},
|
| 2729 |
-
{file = "ruff-0.14.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:410e781f1122d6be4f446981dd479470af86537fb0b8857f27a6e872f65a38e4"},
|
| 2730 |
-
{file = "ruff-0.14.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c01be527ef4c91a6d55e53b337bfe2c0f82af024cc1a33c44792d6844e2331e1"},
|
| 2731 |
-
{file = "ruff-0.14.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f66e9bb762e68d66e48550b59c74314168ebb46199886c5c5aa0b0fbcc81b151"},
|
| 2732 |
-
{file = "ruff-0.14.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d93be8f1fa01022337f1f8f3bcaa7ffee2d0b03f00922c45c2207954f351f465"},
|
| 2733 |
-
{file = "ruff-0.14.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c135d4b681f7401fe0e7312017e41aba9b3160861105726b76cfa14bc25aa367"},
|
| 2734 |
-
{file = "ruff-0.14.5-py3-none-win32.whl", hash = "sha256:c83642e6fccfb6dea8b785eb9f456800dcd6a63f362238af5fc0c83d027dd08b"},
|
| 2735 |
-
{file = "ruff-0.14.5-py3-none-win_amd64.whl", hash = "sha256:9d55d7af7166f143c94eae1db3312f9ea8f95a4defef1979ed516dbb38c27621"},
|
| 2736 |
-
{file = "ruff-0.14.5-py3-none-win_arm64.whl", hash = "sha256:4b700459d4649e2594b31f20a9de33bc7c19976d4746d8d0798ad959621d64a4"},
|
| 2737 |
-
{file = "ruff-0.14.5.tar.gz", hash = "sha256:8d3b48d7d8aad423d3137af7ab6c8b1e38e4de104800f0d596990f6ada1a9fc1"},
|
| 2738 |
-
]
|
| 2739 |
-
|
| 2740 |
[[package]]
|
| 2741 |
name = "safehttpx"
|
| 2742 |
version = "0.1.7"
|
|
@@ -3356,4 +3354,4 @@ files = [
|
|
| 3356 |
[metadata]
|
| 3357 |
lock-version = "2.1"
|
| 3358 |
python-versions = ">=3.10,<3.15"
|
| 3359 |
-
content-hash = "
|
|
|
|
| 129 |
{file = "audioop_lts-0.2.2.tar.gz", hash = "sha256:64d0c62d88e67b98a1a5e71987b7aa7b5bcffc7dcee65b635823dbdd0a8dbbd0"},
|
| 130 |
]
|
| 131 |
|
| 132 |
+
[[package]]
|
| 133 |
+
name = "authlib"
|
| 134 |
+
version = "1.6.5"
|
| 135 |
+
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
| 136 |
+
optional = false
|
| 137 |
+
python-versions = ">=3.9"
|
| 138 |
+
groups = ["main"]
|
| 139 |
+
files = [
|
| 140 |
+
{file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"},
|
| 141 |
+
{file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"},
|
| 142 |
+
]
|
| 143 |
+
|
| 144 |
+
[package.dependencies]
|
| 145 |
+
cryptography = "*"
|
| 146 |
+
|
| 147 |
[[package]]
|
| 148 |
name = "backports-asyncio-runner"
|
| 149 |
version = "1.2.0"
|
|
|
|
| 814 |
|
| 815 |
[[package]]
|
| 816 |
name = "gradio"
|
| 817 |
+
version = "6.0.1"
|
| 818 |
description = "Python library for easily interacting with trained machine learning models"
|
| 819 |
optional = false
|
| 820 |
python-versions = ">=3.10"
|
| 821 |
groups = ["main"]
|
| 822 |
files = [
|
| 823 |
+
{file = "gradio-6.0.1-py3-none-any.whl", hash = "sha256:0f98dc8b414a3f3773cbf3caf5a354507c8ae309ed8266e2f30ca9fa53f379b8"},
|
| 824 |
+
{file = "gradio-6.0.1.tar.gz", hash = "sha256:5d02e6ac34c67aea26b938b8628c8f9f504871392e71f2db559ab8d6799bdf69"},
|
| 825 |
]
|
| 826 |
|
| 827 |
[package.dependencies]
|
| 828 |
aiofiles = ">=22.0,<25.0"
|
| 829 |
anyio = ">=3.0,<5.0"
|
| 830 |
audioop-lts = {version = "<1.0", markers = "python_version >= \"3.13\""}
|
| 831 |
+
authlib = {version = "*", optional = true, markers = "extra == \"oauth\""}
|
| 832 |
brotli = ">=1.1.0"
|
| 833 |
fastapi = ">=0.115.2,<1.0"
|
| 834 |
ffmpy = "*"
|
| 835 |
+
gradio-client = "2.0.0"
|
| 836 |
groovy = ">=0.1,<1.0"
|
| 837 |
httpx = ">=0.24.1,<1.0"
|
| 838 |
huggingface-hub = ">=0.33.5,<2.0"
|
| 839 |
+
itsdangerous = {version = "*", optional = true, markers = "extra == \"oauth\""}
|
| 840 |
jinja2 = "<4.0"
|
| 841 |
markupsafe = ">=2.0,<4.0"
|
| 842 |
numpy = ">=1.0,<3.0"
|
| 843 |
orjson = ">=3.0,<4.0"
|
| 844 |
packaging = "*"
|
| 845 |
pandas = ">=1.0,<3.0"
|
| 846 |
+
pillow = ">=8.0,<13.0"
|
| 847 |
+
pydantic = ">=2.11.10,<=2.12.4"
|
| 848 |
pydub = "*"
|
| 849 |
python-multipart = ">=0.0.18"
|
| 850 |
pyyaml = ">=5.0,<7.0"
|
| 851 |
+
safehttpx = ">=0.1.7,<0.2.0"
|
|
|
|
| 852 |
semantic-version = ">=2.0,<3.0"
|
| 853 |
starlette = ">=0.40.0,<1.0"
|
| 854 |
tomlkit = ">=0.12.0,<0.14.0"
|
|
|
|
| 857 |
uvicorn = ">=0.14.0"
|
| 858 |
|
| 859 |
[package.extras]
|
| 860 |
+
mcp = ["mcp (>=1.21.0,<2.0.0)", "pydantic (>=2.11)"]
|
| 861 |
oauth = ["authlib", "itsdangerous"]
|
| 862 |
|
| 863 |
[[package]]
|
| 864 |
name = "gradio-client"
|
| 865 |
+
version = "2.0.0"
|
| 866 |
description = "Python library for easily interacting with trained machine learning models"
|
| 867 |
optional = false
|
| 868 |
python-versions = ">=3.10"
|
| 869 |
groups = ["main"]
|
| 870 |
files = [
|
| 871 |
+
{file = "gradio_client-2.0.0-py3-none-any.whl", hash = "sha256:77bedf20edcc232d8e7986c1a22165b2bbca1c7c7df10ba808a093d5180dae18"},
|
| 872 |
+
{file = "gradio_client-2.0.0.tar.gz", hash = "sha256:56b462183cb8741bd3e69b21db7d3b62c5abb03c2c2bb925223f1eb18f950e89"},
|
| 873 |
]
|
| 874 |
|
| 875 |
[package.dependencies]
|
|
|
|
| 878 |
huggingface-hub = ">=0.19.3,<2.0"
|
| 879 |
packaging = "*"
|
| 880 |
typing-extensions = ">=4.0,<5.0"
|
|
|
|
| 881 |
|
| 882 |
[[package]]
|
| 883 |
name = "groovy"
|
|
|
|
| 1111 |
{file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"},
|
| 1112 |
]
|
| 1113 |
|
| 1114 |
+
[[package]]
|
| 1115 |
+
name = "itsdangerous"
|
| 1116 |
+
version = "2.2.0"
|
| 1117 |
+
description = "Safely pass data to untrusted environments and back."
|
| 1118 |
+
optional = false
|
| 1119 |
+
python-versions = ">=3.8"
|
| 1120 |
+
groups = ["main"]
|
| 1121 |
+
files = [
|
| 1122 |
+
{file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"},
|
| 1123 |
+
{file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"},
|
| 1124 |
+
]
|
| 1125 |
+
|
| 1126 |
[[package]]
|
| 1127 |
name = "jinja2"
|
| 1128 |
version = "3.1.6"
|
|
|
|
| 2735 |
{file = "rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea"},
|
| 2736 |
]
|
| 2737 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2738 |
[[package]]
|
| 2739 |
name = "safehttpx"
|
| 2740 |
version = "0.1.7"
|
|
|
|
| 3354 |
[metadata]
|
| 3355 |
lock-version = "2.1"
|
| 3356 |
python-versions = ">=3.10,<3.15"
|
| 3357 |
+
content-hash = "ea00e08a220f0b1891b043f1a06bcd40dd78bf93323c52b20ed3775d4ac97ccb"
|
pyproject.toml
CHANGED
|
@@ -17,7 +17,7 @@ dependencies = [
|
|
| 17 |
"anyio (>=4.11.0,<5.0.0)",
|
| 18 |
"transformers (>=4.57.1,<5.0.0)",
|
| 19 |
"typer[all] (>=0.20.0,<0.21.0)",
|
| 20 |
-
"gradio (>=
|
| 21 |
"ddgs (>=9.9.1,<10.0.0)",
|
| 22 |
"uv (>=0.9.11,<0.10.0)",
|
| 23 |
"markdownify (>=1.2.2,<2.0.0)",
|
|
|
|
| 17 |
"anyio (>=4.11.0,<5.0.0)",
|
| 18 |
"transformers (>=4.57.1,<5.0.0)",
|
| 19 |
"typer[all] (>=0.20.0,<0.21.0)",
|
| 20 |
+
"gradio[oauth] (>=6.0.1,<7.0.0)",
|
| 21 |
"ddgs (>=9.9.1,<10.0.0)",
|
| 22 |
"uv (>=0.9.11,<0.10.0)",
|
| 23 |
"markdownify (>=1.2.2,<2.0.0)",
|
src/upgrade_advisor/agents/package.py
CHANGED
|
@@ -28,8 +28,6 @@ logger.setLevel(logging.INFO)
|
|
| 28 |
logger.addHandler(logging.StreamHandler())
|
| 29 |
logger.addHandler(logging.FileHandler("package_agent.log"))
|
| 30 |
|
| 31 |
-
# TODO: Fix the async issues with smolagents using mcpadapt
|
| 32 |
-
|
| 33 |
|
| 34 |
class PackageDiscoveryAgent:
|
| 35 |
"""Agent that discovers metadata about Python packages using MCP tools."""
|
|
|
|
| 28 |
logger.addHandler(logging.StreamHandler())
|
| 29 |
logger.addHandler(logging.FileHandler("package_agent.log"))
|
| 30 |
|
|
|
|
|
|
|
| 31 |
|
| 32 |
class PackageDiscoveryAgent:
|
| 33 |
"""Agent that discovers metadata about Python packages using MCP tools."""
|
src/upgrade_advisor/agents/prompts.py
CHANGED
|
@@ -13,9 +13,12 @@ def get_package_discovery_prompt(
|
|
| 13 |
user_input += f"\nREFRAMED QUESTION (LLM-generated):\n{reframed_question}\n"
|
| 14 |
|
| 15 |
return f"""
|
| 16 |
-
You are a package discovery
|
| 17 |
-
packages.
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
| 19 |
available tools and use that context to provide helpful answers
|
| 20 |
based on the user's question. If the user asks about upgrade recommendations,
|
| 21 |
compatibility issues, known bugs, or best practices, you should gather
|
|
@@ -38,26 +41,30 @@ def get_package_discovery_prompt(
|
|
| 38 |
discovery or upgrade advice. Politely inform the user that you can only help
|
| 39 |
with Python package-related queries.
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
Your knowledge cutoff may prevent you from knowing what's recent.
|
| 42 |
-
NO MATTER WHAT, always use the
|
| 43 |
when reasoning about dates and
|
| 44 |
-
releases
|
|
|
|
|
|
|
| 45 |
you can transform to ISO format and make comparisons.
|
| 46 |
|
| 47 |
-
The first step to tackle such questions is to gather relevant information about the
|
| 48 |
-
packages involved using the available tools. Some tools like the
|
| 49 |
-
`resolve_pyproject_toml`
|
| 50 |
-
can directly analyze a pyproject.toml content to find
|
| 51 |
-
compatibility issues and upgrade suggestions.
|
| 52 |
-
Use the tools to fetch
|
| 53 |
-
package metadata, version history, release notes, compatibility info, and
|
| 54 |
-
known issues. Then, analyze the collected data to identify any potential
|
| 55 |
-
issues, improvements, or recommendations related to the user's question.
|
| 56 |
-
|
| 57 |
-
Always prepare a plan before executing any tools. Iterate over the plan
|
| 58 |
-
step-by-step, using the tools as needed to gather more information. When evidence
|
| 59 |
-
is sufficient, provide a final answer that directly addresses the user's
|
| 60 |
-
question with clear recommendations or insights.
|
| 61 |
|
| 62 |
IMPORTANT EXECUTION GUIDELINES:
|
| 63 |
- Do NOT print intermediate tool outputs. Do not wrap results in strings
|
|
@@ -66,13 +73,16 @@ def get_package_discovery_prompt(
|
|
| 66 |
- The `output_schema` of each tool describes the expected output structure.
|
| 67 |
- Make sure your final answer contains a "reasoning" field that explains
|
| 68 |
how you arrived at your final answer. Do not omit this field.
|
| 69 |
-
- Do not mention the tool names, rather mention what the tool helped you discover.
|
| 70 |
- Return the final structured object using: final_answer(<python_dict>)
|
| 71 |
- Ensure the returned object STRICTLY matches the expected schema for that tool.
|
| 72 |
Do not add or rename keys. Keep value types correct.
|
| 73 |
- To read the contents of any uploaded files, call the `read_upload_file` tool
|
| 74 |
with the path you received (direct file IO like `open()` is blocked).
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
{user_input}
|
| 77 |
|
| 78 |
HINTS:
|
|
|
|
| 13 |
user_input += f"\nREFRAMED QUESTION (LLM-generated):\n{reframed_question}\n"
|
| 14 |
|
| 15 |
return f"""
|
| 16 |
+
You are a package discovery and an upgrade advisor agent for Python
|
| 17 |
+
packages. You are called "FixMyEnv" and specialize in helping developers
|
| 18 |
+
identify package upgrade issues, compatibility problems, known bugs, and
|
| 19 |
+
best practices for managing Python package dependencies.
|
| 20 |
+
You find relevant metadata about Python packages and look up their github
|
| 21 |
+
repositories and relevant discussions, issues, releases etc using the
|
| 22 |
available tools and use that context to provide helpful answers
|
| 23 |
based on the user's question. If the user asks about upgrade recommendations,
|
| 24 |
compatibility issues, known bugs, or best practices, you should gather
|
|
|
|
| 41 |
discovery or upgrade advice. Politely inform the user that you can only help
|
| 42 |
with Python package-related queries.
|
| 43 |
|
| 44 |
+
The first step in answering such questions is to gather relevant information about
|
| 45 |
+
the packages involved using the available tools.
|
| 46 |
+
Some tools like the `resolve_pyproject_toml` can directly analyze a
|
| 47 |
+
pyproject.toml content to find compatibility issues and upgrade suggestions.
|
| 48 |
+
Always prepare a plan before executing any tools. Iterate over the plan
|
| 49 |
+
step-by-step, using the tools as needed
|
| 50 |
+
to gather more information. When evidence is sufficient, provide a final answer
|
| 51 |
+
that directly addresses the user's question with clear recommendations or
|
| 52 |
+
insights. The recommendations should be in a structured markdown format with
|
| 53 |
+
bullet points or numbered lists or code blocks where appropriate.
|
| 54 |
+
|
| 55 |
+
IMPORTANT CONTEXT AND GUIDELINES:
|
| 56 |
+
Any issues in converting requirements.txt to pyproject.toml or parsing
|
| 57 |
+
pyproject.toml is your responsibility to handle using the available tools.
|
| 58 |
+
Make fixes as needed but report such things later in your final answer.
|
| 59 |
+
|
| 60 |
Your knowledge cutoff may prevent you from knowing what's recent.
|
| 61 |
+
NO MATTER WHAT, always use the todays date: {today_date}
|
| 62 |
when reasoning about dates and
|
| 63 |
+
releases (dates are in ISO format YYYY-MM-DD).
|
| 64 |
+
Some tools also provide you the release date information of packages and
|
| 65 |
+
repo releases, which
|
| 66 |
you can transform to ISO format and make comparisons.
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
IMPORTANT EXECUTION GUIDELINES:
|
| 70 |
- Do NOT print intermediate tool outputs. Do not wrap results in strings
|
|
|
|
| 73 |
- The `output_schema` of each tool describes the expected output structure.
|
| 74 |
- Make sure your final answer contains a "reasoning" field that explains
|
| 75 |
how you arrived at your final answer. Do not omit this field.
|
|
|
|
| 76 |
- Return the final structured object using: final_answer(<python_dict>)
|
| 77 |
- Ensure the returned object STRICTLY matches the expected schema for that tool.
|
| 78 |
Do not add or rename keys. Keep value types correct.
|
| 79 |
- To read the contents of any uploaded files, call the `read_upload_file` tool
|
| 80 |
with the path you received (direct file IO like `open()` is blocked).
|
| 81 |
|
| 82 |
+
In your final answer, mention what steps you followed (enumerated) and
|
| 83 |
+
the findings from each of those steps. Based on these findings, state your
|
| 84 |
+
final recommendations.
|
| 85 |
+
|
| 86 |
{user_input}
|
| 87 |
|
| 88 |
HINTS:
|
src/upgrade_advisor/chat/chat.py
CHANGED
|
@@ -21,10 +21,14 @@ logger.setLevel(logging.INFO)
|
|
| 21 |
logger.addHandler(logging.StreamHandler())
|
| 22 |
|
| 23 |
|
| 24 |
-
async def query(payload):
|
| 25 |
API_URL = "https://router.huggingface.co/v1/chat/completions"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
headers = {
|
| 27 |
-
"Authorization": f"Bearer {
|
| 28 |
}
|
| 29 |
try:
|
| 30 |
response = requests.post(API_URL, headers=headers, json=payload, timeout=300)
|
|
@@ -78,7 +82,7 @@ def extract_answer_content(answer_content: str) -> str:
|
|
| 78 |
|
| 79 |
|
| 80 |
async def run_document_qa(
|
| 81 |
-
question: str, context: str, rewritten_question: str = None
|
| 82 |
) -> str:
|
| 83 |
response = await query(
|
| 84 |
{
|
|
@@ -93,14 +97,17 @@ async def run_document_qa(
|
|
| 93 |
},
|
| 94 |
],
|
| 95 |
"model": CHAT_MODEL,
|
| 96 |
-
}
|
|
|
|
| 97 |
)
|
| 98 |
|
| 99 |
answer = parse_response(response)
|
| 100 |
return extract_answer_content(answer["content"])
|
| 101 |
|
| 102 |
|
| 103 |
-
async def qn_rewriter_judge(
|
|
|
|
|
|
|
| 104 |
response = await query(
|
| 105 |
{
|
| 106 |
"messages": [
|
|
@@ -112,7 +119,8 @@ async def qn_rewriter_judge(original_question: str, rewritten_question: str) ->
|
|
| 112 |
},
|
| 113 |
],
|
| 114 |
"model": CHAT_MODEL,
|
| 115 |
-
}
|
|
|
|
| 116 |
)
|
| 117 |
answer = parse_response(response)
|
| 118 |
answer_text = extract_answer_content(answer["content"])
|
|
@@ -123,7 +131,9 @@ async def qn_rewriter_judge(original_question: str, rewritten_question: str) ->
|
|
| 123 |
return False
|
| 124 |
|
| 125 |
|
| 126 |
-
async def qn_rewriter(
|
|
|
|
|
|
|
| 127 |
response = await query(
|
| 128 |
{
|
| 129 |
"messages": [
|
|
@@ -135,12 +145,15 @@ async def qn_rewriter(original_question: str, summarized_history: str = "") -> s
|
|
| 135 |
},
|
| 136 |
],
|
| 137 |
"model": CHAT_MODEL,
|
| 138 |
-
}
|
|
|
|
| 139 |
)
|
| 140 |
|
| 141 |
answer = parse_response(response)
|
| 142 |
rewritten_question = extract_answer_content(answer["content"])
|
| 143 |
-
is_good = await qn_rewriter_judge(
|
|
|
|
|
|
|
| 144 |
|
| 145 |
return rewritten_question, is_good
|
| 146 |
|
|
@@ -149,6 +162,7 @@ async def summarize_chat_history(
|
|
| 149 |
history: list[dict],
|
| 150 |
turns_cutoff=10,
|
| 151 |
word_cutoff=100,
|
|
|
|
| 152 |
) -> str:
|
| 153 |
# history is a list of dicts with 'role' and 'content' keys
|
| 154 |
# [{"role": "user", "content": "..."}, {"role": "assistant", "content":
|
|
@@ -176,7 +190,8 @@ async def summarize_chat_history(
|
|
| 176 |
},
|
| 177 |
],
|
| 178 |
"model": CHAT_MODEL,
|
| 179 |
-
}
|
|
|
|
| 180 |
)
|
| 181 |
|
| 182 |
answer = parse_response(response)
|
|
|
|
| 21 |
logger.addHandler(logging.StreamHandler())
|
| 22 |
|
| 23 |
|
| 24 |
+
async def query(payload, token=None):
|
| 25 |
API_URL = "https://router.huggingface.co/v1/chat/completions"
|
| 26 |
+
token = token or os.environ.get("HF_TOKEN", "") # use passed token or env var
|
| 27 |
+
if token is None or token == "":
|
| 28 |
+
logger.error("No Huggingface token provided for API request.")
|
| 29 |
+
|
| 30 |
headers = {
|
| 31 |
+
"Authorization": f"Bearer {token}",
|
| 32 |
}
|
| 33 |
try:
|
| 34 |
response = requests.post(API_URL, headers=headers, json=payload, timeout=300)
|
|
|
|
| 82 |
|
| 83 |
|
| 84 |
async def run_document_qa(
|
| 85 |
+
question: str, context: str, rewritten_question: str = None, token: str = None
|
| 86 |
) -> str:
|
| 87 |
response = await query(
|
| 88 |
{
|
|
|
|
| 97 |
},
|
| 98 |
],
|
| 99 |
"model": CHAT_MODEL,
|
| 100 |
+
},
|
| 101 |
+
token=token,
|
| 102 |
)
|
| 103 |
|
| 104 |
answer = parse_response(response)
|
| 105 |
return extract_answer_content(answer["content"])
|
| 106 |
|
| 107 |
|
| 108 |
+
async def qn_rewriter_judge(
|
| 109 |
+
original_question: str, rewritten_question: str, token: str = None
|
| 110 |
+
) -> str:
|
| 111 |
response = await query(
|
| 112 |
{
|
| 113 |
"messages": [
|
|
|
|
| 119 |
},
|
| 120 |
],
|
| 121 |
"model": CHAT_MODEL,
|
| 122 |
+
},
|
| 123 |
+
token=token,
|
| 124 |
)
|
| 125 |
answer = parse_response(response)
|
| 126 |
answer_text = extract_answer_content(answer["content"])
|
|
|
|
| 131 |
return False
|
| 132 |
|
| 133 |
|
| 134 |
+
async def qn_rewriter(
|
| 135 |
+
original_question: str, summarized_history: str = "", token: str = None
|
| 136 |
+
) -> str:
|
| 137 |
response = await query(
|
| 138 |
{
|
| 139 |
"messages": [
|
|
|
|
| 145 |
},
|
| 146 |
],
|
| 147 |
"model": CHAT_MODEL,
|
| 148 |
+
},
|
| 149 |
+
token=token,
|
| 150 |
)
|
| 151 |
|
| 152 |
answer = parse_response(response)
|
| 153 |
rewritten_question = extract_answer_content(answer["content"])
|
| 154 |
+
is_good = await qn_rewriter_judge(
|
| 155 |
+
original_question, rewritten_question, token=token
|
| 156 |
+
)
|
| 157 |
|
| 158 |
return rewritten_question, is_good
|
| 159 |
|
|
|
|
| 162 |
history: list[dict],
|
| 163 |
turns_cutoff=10,
|
| 164 |
word_cutoff=100,
|
| 165 |
+
token: str = None,
|
| 166 |
) -> str:
|
| 167 |
# history is a list of dicts with 'role' and 'content' keys
|
| 168 |
# [{"role": "user", "content": "..."}, {"role": "assistant", "content":
|
|
|
|
| 190 |
},
|
| 191 |
],
|
| 192 |
"model": CHAT_MODEL,
|
| 193 |
+
},
|
| 194 |
+
token=token,
|
| 195 |
)
|
| 196 |
|
| 197 |
answer = parse_response(response)
|
src/upgrade_advisor/misc.py
CHANGED
|
@@ -147,11 +147,28 @@ def get_example_questions(n: int = 3) -> str:
|
|
| 147 |
]
|
| 148 |
choices = random.sample(all_questions, k=n)
|
| 149 |
# format as list of lists for gradio examples
|
| 150 |
-
choices = [[
|
| 151 |
|
| 152 |
return choices
|
| 153 |
|
| 154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
def _monkeypatch_gradio_save_history():
|
| 156 |
"""Guard against non-int indices in Gradio's chat history saver.
|
| 157 |
|
|
|
|
| 147 |
]
|
| 148 |
choices = random.sample(all_questions, k=n)
|
| 149 |
# format as list of lists for gradio examples
|
| 150 |
+
choices = [[q] for q in choices]
|
| 151 |
|
| 152 |
return choices
|
| 153 |
|
| 154 |
|
| 155 |
+
def to_openai_message_format(role: str, content: str, append_to: list = None) -> dict:
|
| 156 |
+
message = {
|
| 157 |
+
"role": role,
|
| 158 |
+
"content": [
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": content,
|
| 162 |
+
}
|
| 163 |
+
],
|
| 164 |
+
}
|
| 165 |
+
if append_to is not None:
|
| 166 |
+
# if its supposed to be appended to a list
|
| 167 |
+
append_to.append(message)
|
| 168 |
+
return append_to
|
| 169 |
+
return message
|
| 170 |
+
|
| 171 |
+
|
| 172 |
def _monkeypatch_gradio_save_history():
|
| 173 |
"""Guard against non-int indices in Gradio's chat history saver.
|
| 174 |
|