Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion denario/langgraph_agents/literature.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ def semantic_scholar(state: GraphState, config: RunnableConfig):
paper_str = f"""{papers_analyzed+state['literature']['num_papers']}. {title} ({year})\nAuthors: {authors}\nAbstract: {abstract}\nURL: {url}"""

# extract arXiv link, if any
arXiv_pdf = None
arXiv_pdf2 = None
if externalID:
arXiv = externalID.get("ArXiv", None)
if arXiv:
Expand All @@ -111,7 +113,7 @@ def semantic_scholar(state: GraphState, config: RunnableConfig):
# extract pdf link, if any
if pdf:
pdf = pdf.get('url', None)
if pdf and pdf!=arXiv_pdf and pdf!=arXiv_pdf2:
if pdf and (arXiv_pdf is None or (pdf!=arXiv_pdf and pdf!=arXiv_pdf2)):
paper_str = f"{paper_str}\npdf: {pdf}"

# put these papers in the literature.log
Expand Down
45 changes: 45 additions & 0 deletions denario/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,16 @@ class LLM(BaseModel):
temperature=0.7)
"""`gemini-2.5-pro` model."""

gemini3propreview = LLM(name="gemini-3-pro-preview",
max_output_tokens=65536,
temperature=0.7)
"""`gemini-3-pro-preview` model."""

gemini3flashpreview = LLM(name="gemini-3-flash-preview",
max_output_tokens=65536,
temperature=0.7)
"""`gemini-3-flash-preview` model."""

o3mini = LLM(name="o3-mini-2025-01-31",
max_output_tokens=100000,
temperature=None)
Expand Down Expand Up @@ -60,6 +70,16 @@ class LLM(BaseModel):
temperature=None)
"""`gpt-5` model """

gpt52 = LLM(name="gpt-5.2",
max_output_tokens=128000,
temperature=None)
"""`gpt-5.2` model."""

gpt52pro = LLM(name="gpt-5.2-pro",
max_output_tokens=128000,
temperature=None)
"""`gpt-5.2-pro` model."""

gpt5mini = LLM(name="gpt-5-mini",
max_output_tokens=128000,
temperature=None)
Expand All @@ -80,20 +100,45 @@ class LLM(BaseModel):
temperature=0)
"""`claude-4.1-Opus` model."""

claude45sonnet = LLM(name="claude-sonnet-4-5",
max_output_tokens=64000,
temperature=0)
"""`claude-4.5-Sonnet` model."""

claude45haiku = LLM(name="claude-haiku-4-5",
max_output_tokens=64000,
temperature=0)
"""`claude-4.5-Haiku` model."""

claude45opus = LLM(name="claude-opus-4-5",
max_output_tokens=64000,
temperature=0)
"""`claude-4.5-Opus` model."""


models : Dict[str, LLM] = {
"gemini-2.0-flash" : gemini20flash,
"gemini-2.5-flash" : gemini25flash,
"gemini-2.5-pro" : gemini25pro,
"gemini-3-pro" : gemini3propreview,
"gemini-3-pro-preview" : gemini3propreview,
"gemini-3-flash" : gemini3flashpreview,
"gemini-3-flash-preview" : gemini3flashpreview,
"o3-mini" : o3mini,
"gpt-4o" : gpt4o,
"gpt-4.1" : gpt41,
"gpt-4.1-mini" : gpt41mini,
"gpt-4o-mini" : gpt4omini,
"gpt-4.5" : gpt45,
"gpt-5" : gpt5,
"gpt-5.2" : gpt52,
"gpt-5.2-pro" : gpt52pro,
"gpt-5-mini" : gpt5mini,
"claude-3.7-sonnet" : claude37sonnet,
"claude-4-opus" : claude4opus,
"claude-4.1-opus" : claude41opus,
"claude-4.5-sonnet" : claude45sonnet,
"claude-4.5-haiku" : claude45haiku,
"claude-4.5-opus" : claude45opus,
}
"""Dictionary with the available models."""
29 changes: 28 additions & 1 deletion denario/paper_agents/literature.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import re
import requests
from typing import List, Tuple
from requests.exceptions import JSONDecodeError as RequestsJSONDecodeError

from ..key_manager import KeyManager

Expand All @@ -15,8 +16,34 @@ def _execute_query(payload, keys: KeyManager):
PerplexityChatCompletionResponse: Parsed response from the Perplexity API.
"""
api_key = keys.PERPLEXITY
if not api_key:
raise RuntimeError(
"PERPLEXITY_API_KEY is not set. Set it to enable add_citations=True, "
"or run get_paper(add_citations=False)."
)
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
response = requests.post("https://api.perplexity.ai/chat/completions", headers=headers, json=payload).json()
raw_response = requests.post(
"https://api.perplexity.ai/chat/completions",
headers=headers,
json=payload,
timeout=60,
)

if raw_response.status_code != 200:
body_preview = (raw_response.text or "").strip()[:500]
raise RuntimeError(
f"Perplexity API error {raw_response.status_code}. "
f"Body (first 500 chars): {body_preview}"
)

try:
response = raw_response.json()
except (ValueError, RequestsJSONDecodeError) as e:
body_preview = (raw_response.text or "").strip()[:500]
raise RuntimeError(
"Perplexity API returned non-JSON response. "
f"Body (first 500 chars): {body_preview}"
) from e

return response

Expand Down
21 changes: 17 additions & 4 deletions denario/paper_agents/paper_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,11 @@ async def add_citations_async(state, text, section_name):

loop = asyncio.get_event_loop()
func = partial(process_tex_file_with_references, text, state["keys"])
new_text, references = await loop.run_in_executor(None, func)
try:
new_text, references = await loop.run_in_executor(None, func)
except Exception as e:
print(f" {section_name} citations failed: {e}")
return section_name, text, ""
new_text = clean_section(new_text, section_name)

# save temporary file
Expand All @@ -494,16 +498,26 @@ async def citations_node(state: GraphState, config: RunnableConfig):

print("Adding citations...")

if not state["keys"].PERPLEXITY:
print("⚠️ PERPLEXITY_API_KEY not set; skipping citation insertion.")
return {'paper': state['paper'],
'tokens': state['tokens']}

#sections = ['Introduction', 'Methods', 'Results', 'Conclusions']
sections = ['Introduction', 'Methods']
tasks = [add_citations_async(state, state['paper'][section], section) for section in sections]
results = await asyncio.gather(*tasks)
results = await asyncio.gather(*tasks, return_exceptions=True)

# Deduplicate full BibTeX entries
bib_entries_set = set()
bib_entries_list = []

for section_name, updated_text, references in results:
for result in results:
if isinstance(result, Exception):
print(f"⚠️ Citation task failed: {result}")
continue

section_name, updated_text, references = result

state['paper'][section_name] = updated_text

Expand Down Expand Up @@ -553,4 +567,3 @@ async def citations_node(state: GraphState, config: RunnableConfig):
return {'paper': state['paper'],
'tokens': state['tokens']}
#######################################################################################

25 changes: 25 additions & 0 deletions tests/smoke_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from denario.llm import models


def main() -> None:
required = [
"gemini-3-flash",
"gemini-3-pro",
"gpt-5.2",
"gpt-5.2-pro",
"claude-4.5-sonnet",
"claude-4.5-opus",
"claude-4.5-haiku",
]

missing = [name for name in required if name not in models]
if missing:
raise SystemExit(f"Missing model aliases: {missing}")

print("Model alias smoke test passed:")
for name in required:
print(f"- {name} -> {models[name].name}")


if __name__ == "__main__":
main()