Spaces:
Sleeping
Sleeping
File size: 6,318 Bytes
f2200ab 596aa39 08a5a31 596aa39 f2200ab 596aa39 f2200ab 596aa39 f2200ab 596aa39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
import json
import os
from typing import List, Dict
from openai import OpenAI
from pydantic import BaseModel
import asyncio
from dotenv import load_dotenv
load_dotenv()
# Initialize OpenRouter client
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=os.getenv("OPENROUTER_API_KEY"),
)
# Priority list of models (Consistency with other files)
MODELS = [
"deepseek/deepseek-r1",
"deepseek/deepseek-r1-distill-llama-70b",
"google/gemini-2.5-flash-lite"
]
class MetaReviewResult(BaseModel):
meta_review: str
def construct_meta_review_prompt(
paper_title: str,
paper_abstract: str,
resolutions: List[Dict],
search_results: Dict
) -> tuple:
"""
Construct prompt for meta-review generation
"""
# Aggregate all resolutions
all_accepted = {}
all_rejected = {}
resolution_summaries = []
for resolution in resolutions:
details = resolution.get('resolution_details', {})
# Merge accepted points
accepted = details.get('accepted_critique_points', {})
for category, points in accepted.items():
if category not in all_accepted:
all_accepted[category] = []
all_accepted[category].extend(points)
# Merge rejected points
rejected = details.get('rejected_critique_points', {})
for category, points in rejected.items():
if category not in all_rejected:
all_rejected[category] = []
all_rejected[category].extend(points)
# Collect summaries
summary = details.get('final_resolution_summary', '')
if summary:
resolution_summaries.append(summary)
system_prompt = """
You are an expert meta-reviewer. Your task is to generate a structured, comprehensive
meta-review based on reviewer critiques, disagreements, and resolutions.
Your review should be clear, concise, well-structured, and provide actionable feedback.
Respond with ONLY the meta-review text (no JSON, no preamble).
"""
user_prompt = f"""
### **Paper Details**
**Title:** {paper_title}
**Abstract:** {paper_abstract}
### **Disagreement Resolution Summaries**
{chr(10).join(f"- {summary}" for summary in resolution_summaries)}
### **Accepted Critique Points (Valid Feedback)**
{json.dumps(all_accepted, indent=2)}
### **Rejected Critique Points (Unjustified Criticism)**
{json.dumps(all_rejected, indent=2)}
### **State-of-the-Art (SoTA) Findings**
{search_results.get('SoTA_Results', '')[:2000]}
### **Retrieved Evidence for Validation**
{json.dumps(search_results.get('Retrieved_Evidence', {}), indent=2)[:2000]}
### **Meta-Review Task**
Generate a comprehensive meta-review that:
1. Summarizes the paper's main contribution and approach
2. Discusses the strengths of the paper (based on accepted critiques and evidence)
3. Discusses the weaknesses and concerns (based on valid accepted critiques)
4. Addresses key disagreements among reviewers and how they were resolved
5. Compares the paper's claims with state-of-the-art research
6. Provides a final verdict on the paper's quality, novelty, significance, and clarity
7. Offers constructive recommendations for improvement
Format the meta-review professionally with clear sections.
"""
return system_prompt, user_prompt
async def generate_meta_review(
paper_title: str,
paper_abstract: str,
resolutions: List[Dict],
search_results: Dict,
retries: int = 3 # Reduced retries per model
) -> str:
"""
Generate a meta-review using DeepSeek-R1 (with fallback)
"""
if not resolutions:
return "Unable to generate meta-review: No disagreement resolutions available."
system_prompt, user_prompt = construct_meta_review_prompt(
paper_title,
paper_abstract,
resolutions,
search_results
)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
last_exception = None
for model in MODELS:
print(f"Generating Meta-Review with model: {model}")
for attempt in range(retries):
try:
response = await asyncio.to_thread(
client.chat.completions.create,
model=model,
messages=messages,
# CRITICAL FIX: Limit max_tokens to prevent 402 Error
# 8192 is plenty for a meta-review (~6000 words) but much cheaper than 64k
max_tokens=8192,
)
if not response.choices or not response.choices[0].message.content.strip():
raise ValueError("Empty response from AI")
meta_review_text = response.choices[0].message.content.strip()
# Remove any JSON formatting/thinking tags if present (cleanup)
if meta_review_text.startswith("```"):
lines = meta_review_text.split("\n")
# Simple heuristic to strip markdown fences
if lines[0].startswith("```"):
lines = lines[1:]
if lines and lines[-1].startswith("```"):
lines = lines[:-1]
meta_review_text = "\n".join(lines)
return meta_review_text
except Exception as e:
last_exception = e
error_msg = str(e)
print(f"Model {model} - Attempt {attempt + 1} failed: {error_msg}")
# Immediate fallback on payment errors
if "402" in error_msg or "insufficient_quota" in error_msg:
print("Insufficient credits detected. Switching to cheaper model...")
break
wait_time = 2 ** attempt
if attempt < retries - 1:
await asyncio.sleep(wait_time)
return f"Error generating meta-review after trying all models: {str(last_exception)}"
|