-
-
Notifications
You must be signed in to change notification settings - Fork 82
feat: Add BYOK (Bring Your Own Key) support for Groq API #145
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| .env |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -32,8 +32,6 @@ | |||||
|
|
||||||
| load_dotenv() | ||||||
|
|
||||||
| client = Groq(api_key=os.getenv("GROQ_API_KEY")) | ||||||
|
|
||||||
|
|
||||||
| def build_context(docs): | ||||||
| return "\n".join( | ||||||
|
|
@@ -42,10 +40,19 @@ def build_context(docs): | |||||
| ) | ||||||
|
|
||||||
|
|
||||||
| def ask_llm(question, docs): | ||||||
| context = build_context(docs) | ||||||
| logger.debug(f"Generated context for LLM:\n{context}") | ||||||
| prompt = f"""You are an assistant that answers based on context. | ||||||
| def ask_llm(question, docs, api_key: str, groq_model: str = "llama-3.3-70b-versatile", article_text: str = ""): | ||||||
| client = Groq(api_key=api_key) | ||||||
| pinecone_context = build_context(docs) | ||||||
| logger.debug(f"Generated context for LLM:\n{pinecone_context}") | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Avoid logging full LLM context payloads. Line 46 logs raw context text, which can leak user/article content into logs and create very large log entries. Log size/metadata instead of full text. 🔧 Suggested change- logger.debug(f"Generated context for LLM:\n{pinecone_context}")
+ logger.debug("Generated LLM context from Pinecone notes (chars=%d)", len(pinecone_context))📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||
|
|
||||||
| context_parts = [] | ||||||
| if article_text: | ||||||
| context_parts.append(f"=== Full Article ===\n{article_text}") | ||||||
| if pinecone_context: | ||||||
| context_parts.append(f"=== Fact-Check Notes ===\n{pinecone_context}") | ||||||
| context = "\n\n".join(context_parts) or "No context available." | ||||||
|
|
||||||
| prompt = f"""You are an assistant that answers questions about a news article. | ||||||
|
|
||||||
| Context: | ||||||
| {context} | ||||||
|
|
@@ -55,7 +62,7 @@ def ask_llm(question, docs): | |||||
| """ | ||||||
|
|
||||||
| response = client.chat.completions.create( | ||||||
| model="gemma2-9b-it", | ||||||
| model=groq_model, | ||||||
| messages=[ | ||||||
| {"role": "system", "content": "Use only the context to answer."}, | ||||||
| {"role": "user", "content": prompt}, | ||||||
|
|
||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -34,10 +34,9 @@ | |
|
|
||
| load_dotenv() | ||
|
|
||
| client = Groq(api_key=os.getenv("GROQ_API_KEY")) | ||
|
|
||
|
|
||
| def run_claim_extractor_sdk(state): | ||
| client = Groq(api_key=state["groq_api_key"]) | ||
| try: | ||
|
Comment on lines
+39
to
40
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Guard API key access inside Line 39 can throw Suggested fix def run_claim_extractor_sdk(state):
- client = Groq(api_key=state["groq_api_key"])
try:
+ api_key = state.get("groq_api_key")
+ if not api_key:
+ raise ValueError("Missing 'groq_api_key' in state")
+ client = Groq(api_key=api_key)
text = state.get("cleaned_text")🤖 Prompt for AI Agents |
||
| text = state.get("cleaned_text") | ||
| if not text: | ||
|
|
@@ -63,7 +62,7 @@ def run_claim_extractor_sdk(state): | |
| ), | ||
| }, | ||
| ], | ||
| model="gemma2-9b-it", | ||
| model=state.get("groq_model", "llama-3.3-70b-versatile"), | ||
| temperature=0.3, | ||
| max_tokens=512, | ||
| ) | ||
|
|
@@ -87,7 +86,8 @@ def run_claim_extractor_sdk(state): | |
| } | ||
|
|
||
|
|
||
| def run_fact_verifier_sdk(search_results): | ||
| def run_fact_verifier_sdk(search_results, api_key: str, groq_model: str = "llama-3.3-70b-versatile"): | ||
| client = Groq(api_key=api_key) | ||
| try: | ||
| results_list = [] | ||
|
|
||
|
|
@@ -128,7 +128,7 @@ def run_fact_verifier_sdk(search_results): | |
| ), | ||
| }, | ||
| ], | ||
| model="gemma2-9b-it", | ||
| model=groq_model, | ||
| temperature=0.3, | ||
| max_tokens=256, | ||
| ) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -52,6 +52,8 @@ class MyState(TypedDict): | |
| score: int | ||
| retries: int | ||
| status: str | ||
| groq_api_key: str | ||
| groq_model: str | ||
|
Comment on lines
+55
to
+56
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: # First, find and read the langgraph_builder.py file
fd langgraph_builder.pyRepository: AOSSIE-Org/Perspective Length of output: 106 🏁 Script executed: # Once we have the file, let's examine the context around lines 55-56
head -80 backend/app/modules/langgraph_builder.py | tail -40Repository: AOSSIE-Org/Perspective Length of output: 1105 🏁 Script executed: # Search for groq_api_key usage throughout the codebase
rg -n 'groq_api_key' --type py -B2 -A2Repository: AOSSIE-Org/Perspective Length of output: 2957 🏁 Script executed: # Search for groq_model usage
rg -n 'groq_model' --type py -B2 -A2Repository: AOSSIE-Org/Perspective Length of output: 7629 🏁 Script executed: # Search for potential persistence/storage operations
rg -n 'store_and_send|JSONResponse|return\s+\{|insert|upsert|save|persist|log' backend/app/modules/langgraph_builder.py -B2 -A2Repository: AOSSIE-Org/Perspective Length of output: 1230 🏁 Script executed: # Check for any JSON/dict serialization that might expose the key
rg -n 'json\.|dumps\|to_dict\|serialize' backend/app/modules/langgraph_builder.py -B2 -A2Repository: AOSSIE-Org/Perspective Length of output: 48 🏁 Script executed: # Find store_and_send.py
fd store_and_send.pyRepository: AOSSIE-Org/Perspective Length of output: 119 🏁 Script executed: # Read the store_and_send implementation
cat backend/app/modules/langgraph_nodes/store_and_send.pyRepository: AOSSIE-Org/Perspective Length of output: 1815 🏁 Script executed: # Check routes.py to see what data is returned from API endpoints
cat backend/app/routes/routes.pyRepository: AOSSIE-Org/Perspective Length of output: 3748 🏁 Script executed: # Search for any logging statements that might expose groq_api_key
rg -n 'logger\.|print\(' backend/app/modules/langgraph_builder.py backend/app/modules/langgraph_nodes/ backend/app/modules/pipeline.py -A2Repository: AOSSIE-Org/Perspective Length of output: 3450 🏁 Script executed: # Search for return statements that might include the entire state
rg -n 'return\s+\{|return\s+result|return\s+data' backend/app/routes/routes.py -B3 -A3Repository: AOSSIE-Org/Perspective Length of output: 750 🏁 Script executed: # Find chunk_rag_data.py
fd chunk_rag_data.pyRepository: AOSSIE-Org/Perspective Length of output: 116 🏁 Script executed: # Read the chunk_rag_data implementation
cat backend/app/modules/vector_store/chunk_rag_data.pyRepository: AOSSIE-Org/Perspective Length of output: 3488 🏁 Script executed: # Check what fields are actually needed by chunk_rag_data
rg -n 'state\[|state\.get' backend/app/modules/vector_store/chunk_rag_data.pyRepository: AOSSIE-Org/Perspective Length of output: 48 Strip The API key is currently exposed in two critical ways:
Fix: In return {k: v for k, v in state.items() if k not in ("groq_api_key", "groq_model")}, "status": "success"}Also remove the debug log at line 29 or exclude sensitive fields from it. 🤖 Prompt for AI Agents |
||
|
|
||
|
|
||
| def build_langgraph(): | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -32,12 +32,10 @@ def run_fact_check(state): | |||||||||||||||||
| verifications, error_message = run_fact_check_pipeline(state) | ||||||||||||||||||
|
|
||||||||||||||||||
| if error_message: | ||||||||||||||||||
| logger.error(f"Error in fact-checking: {error_message}") | ||||||||||||||||||
| return { | ||||||||||||||||||
| "status": "error", | ||||||||||||||||||
| "error_from": "fact_checking", | ||||||||||||||||||
| "message": f"{error_message}", | ||||||||||||||||||
| } | ||||||||||||||||||
| # Soft failure — web search quota/key issue. Continue with empty facts | ||||||||||||||||||
| # so the rest of the pipeline (generate_perspective, store_and_send) still runs. | ||||||||||||||||||
| logger.warning(f"Fact-checking skipped (non-fatal): {error_message}") | ||||||||||||||||||
| verifications = [] | ||||||||||||||||||
|
Comment on lines
+35
to
+38
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Avoid logging raw fact-check error payloads. Line 37 logs Suggested fix- logger.warning(f"Fact-checking skipped (non-fatal): {error_message}")
+ logger.warning("Fact-checking skipped (non-fatal). reason=%s", "upstream_fact_check_failure")📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||||||||||
|
|
||||||||||||||||||
| except Exception as e: | ||||||||||||||||||
| logger.exception(f"Unexpected error in fact-checking: {e}") | ||||||||||||||||||
|
|
||||||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -36,15 +36,6 @@ class PerspectiveOutput(BaseModel): | |
| perspective: str = Field(..., description="Generated opposite perspective") | ||
|
|
||
|
|
||
| my_llm = "llama-3.3-70b-versatile" | ||
|
|
||
| llm = ChatGroq(model=my_llm, temperature=0.7) | ||
|
|
||
| structured_llm = llm.with_structured_output(PerspectiveOutput) | ||
|
|
||
|
|
||
| chain = prompt | structured_llm | ||
|
|
||
|
|
||
| def generate_perspective(state): | ||
| try: | ||
|
|
@@ -56,17 +47,19 @@ def generate_perspective(state): | |
|
|
||
| if not text: | ||
| raise ValueError("Missing or empty 'cleaned_text' in state") | ||
| elif not facts: | ||
| raise ValueError("Missing or empty 'facts' in state") | ||
|
|
||
| llm = ChatGroq(model=state["groq_model"], temperature=0.7, api_key=state["groq_api_key"]) | ||
| chain = prompt | llm.with_structured_output(PerspectiveOutput) | ||
|
|
||
| # facts may be empty if web search failed — generate perspective without them | ||
| facts_str = "\n".join( | ||
| [ | ||
| f"Claim: {f['original_claim']}\n" | ||
| "Verdict: {f['verdict']}\nExplanation: " | ||
| "{f['explanation']}" | ||
| for f in state["facts"] | ||
| for f in (facts or []) | ||
|
Comment on lines
55
to
+60
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fix facts prompt interpolation; verdict/explanation are currently literal text. In this block, only the first line is an f-string; Line 58 and Line 59 placeholders are not interpolated. Suggested fix facts_str = "\n".join(
[
f"Claim: {f['original_claim']}\n"
- "Verdict: {f['verdict']}\nExplanation: "
- "{f['explanation']}"
+ f"Verdict: {f['verdict']}\n"
+ f"Explanation: {f['explanation']}"
for f in (facts or [])
]
) or "No verified facts available."🤖 Prompt for AI Agents |
||
| ] | ||
| ) | ||
| ) or "No verified facts available." | ||
|
|
||
| result = chain.invoke( | ||
| { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,14 +23,13 @@ | |
| logger = setup_logger(__name__) | ||
|
|
||
| # Init once | ||
| groq_llm = ChatGroq( | ||
| model="gemma2-9b-it", | ||
| temperature=0.0, | ||
| max_tokens=10, | ||
| ) | ||
|
|
||
|
|
||
| def judge_perspective(state): | ||
| groq_llm = ChatGroq( | ||
| model=state.get("groq_model", "llama-3.3-70b-versatile"), | ||
| temperature=0.0, | ||
| max_tokens=10, | ||
| api_key=state["groq_api_key"], | ||
| ) | ||
| try: | ||
|
Comment on lines
+27
to
33
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Move At Line 31, direct indexing ( Proposed fix def judge_perspective(state):
- groq_llm = ChatGroq(
- model=state.get("groq_model", "llama-3.3-70b-versatile"),
- temperature=0.0,
- max_tokens=10,
- api_key=state["groq_api_key"],
- )
try:
+ api_key = state.get("groq_api_key")
+ if not api_key:
+ raise ValueError("Missing 'groq_api_key' in state")
+
+ groq_llm = ChatGroq(
+ model=state.get("groq_model", "llama-3.3-70b-versatile"),
+ temperature=0.0,
+ max_tokens=10,
+ api_key=api_key,
+ )
perspective_obj = state.get("perspective")🤖 Prompt for AI Agents |
||
| perspective_obj = state.get("perspective") | ||
| text = getattr(perspective_obj, "perspective", "").strip() | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,10 +23,9 @@ | |
|
|
||
| load_dotenv() | ||
|
|
||
| client = Groq(api_key=os.getenv("GROQ_API_KEY")) | ||
|
|
||
|
|
||
| def run_sentiment_sdk(state): | ||
| client = Groq(api_key=state["groq_api_key"]) | ||
| try: | ||
|
Comment on lines
+28
to
29
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Move API-key access inside guarded error handling. Line 28 can raise Suggested fix-def run_sentiment_sdk(state):
- client = Groq(api_key=state["groq_api_key"])
- try:
+def run_sentiment_sdk(state):
+ try:
+ api_key = state.get("groq_api_key")
+ if not api_key:
+ raise ValueError("Missing 'groq_api_key' in state")
+ client = Groq(api_key=api_key)
text = state.get("cleaned_text")🤖 Prompt for AI Agents |
||
| text = state.get("cleaned_text") | ||
| if not text: | ||
|
|
@@ -49,7 +48,7 @@ def run_sentiment_sdk(state): | |
| ), | ||
| }, | ||
| ], | ||
| model="gemma2-9b-it", | ||
| model=state.get("groq_model", "llama-3.3-70b-versatile"), | ||
| temperature=0.2, | ||
| max_tokens=3, | ||
| ) | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -30,7 +30,7 @@ | |||||||||||||||
| """ | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| from fastapi import APIRouter | ||||||||||||||||
| from fastapi import APIRouter, Request, HTTPException | ||||||||||||||||
| from pydantic import BaseModel | ||||||||||||||||
| from app.modules.pipeline import run_scraper_pipeline | ||||||||||||||||
| from app.modules.pipeline import run_langgraph_workflow | ||||||||||||||||
|
|
@@ -52,6 +52,7 @@ class URlRequest(BaseModel): | |||||||||||||||
|
|
||||||||||||||||
| class ChatQuery(BaseModel): | ||||||||||||||||
| message: str | ||||||||||||||||
| article_text: str = "" | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| @router.get("/") | ||||||||||||||||
|
|
@@ -60,26 +61,37 @@ async def home(): | |||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| @router.post("/bias") | ||||||||||||||||
| async def bias_detection(request: URlRequest): | ||||||||||||||||
| content = await asyncio.to_thread(run_scraper_pipeline, (request.url)) | ||||||||||||||||
| bias_score = await asyncio.to_thread(check_bias, (content)) | ||||||||||||||||
| async def bias_detection(url_request: URlRequest, request: Request): | ||||||||||||||||
| api_key = request.headers.get("x-byok-api-key") | ||||||||||||||||
| if not api_key: | ||||||||||||||||
| raise HTTPException(status_code=401, detail="Missing X-BYOK-Api-Key header") | ||||||||||||||||
| groq_model = request.headers.get("x-byok-model", "llama-3.3-70b-versatile") | ||||||||||||||||
| content = await asyncio.to_thread(run_scraper_pipeline, url_request.url) | ||||||||||||||||
| bias_score = await asyncio.to_thread(check_bias, content, api_key, groq_model) | ||||||||||||||||
|
Comment on lines
+69
to
+70
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Pass cleaned article text to bias checker, not the whole scraper dict.
🔧 Suggested change- content = await asyncio.to_thread(run_scraper_pipeline, url_request.url)
- bias_score = await asyncio.to_thread(check_bias, content, api_key, groq_model)
+ content = await asyncio.to_thread(run_scraper_pipeline, url_request.url)
+ cleaned_text = content.get("cleaned_text", "")
+ if not cleaned_text:
+ raise HTTPException(status_code=422, detail="Scraper returned empty article text")
+ bias_score = await asyncio.to_thread(check_bias, cleaned_text, api_key, groq_model)📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||||||||
| logger.info(f"Bias detection result: {bias_score}") | ||||||||||||||||
| return bias_score | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| @router.post("/process") | ||||||||||||||||
| async def run_pipelines(request: URlRequest): | ||||||||||||||||
| article_text = await asyncio.to_thread(run_scraper_pipeline, (request.url)) | ||||||||||||||||
| async def run_pipelines(url_request: URlRequest, request: Request): | ||||||||||||||||
| api_key = request.headers.get("x-byok-api-key") | ||||||||||||||||
| if not api_key: | ||||||||||||||||
| raise HTTPException(status_code=401, detail="Missing X-BYOK-Api-Key header") | ||||||||||||||||
| groq_model = request.headers.get("x-byok-model", "llama-3.3-70b-versatile") | ||||||||||||||||
| article_text = await asyncio.to_thread(run_scraper_pipeline, url_request.url) | ||||||||||||||||
| logger.debug(f"Scraper output: {json.dumps(article_text, indent=2, ensure_ascii=False)}") | ||||||||||||||||
| data = await asyncio.to_thread(run_langgraph_workflow, (article_text)) | ||||||||||||||||
| data = await asyncio.to_thread(run_langgraph_workflow, article_text, api_key, groq_model) | ||||||||||||||||
| return data | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| @router.post("/chat") | ||||||||||||||||
| async def answer_query(request: ChatQuery): | ||||||||||||||||
| query = request.message | ||||||||||||||||
| async def answer_query(chat_request: ChatQuery, request: Request): | ||||||||||||||||
| api_key = request.headers.get("x-byok-api-key") | ||||||||||||||||
| if not api_key: | ||||||||||||||||
| raise HTTPException(status_code=401, detail="Missing X-BYOK-Api-Key header") | ||||||||||||||||
| groq_model = request.headers.get("x-byok-model", "llama-3.3-70b-versatile") | ||||||||||||||||
| query = chat_request.message | ||||||||||||||||
| results = search_pinecone(query) | ||||||||||||||||
| answer = ask_llm(query, results) | ||||||||||||||||
| answer = ask_llm(query, results, api_key, groq_model, chat_request.article_text) | ||||||||||||||||
|
Comment on lines
94
to
+95
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: find backend -name "routes.py" -type f | head -5Repository: AOSSIE-Org/Perspective Length of output: 94 🏁 Script executed: cat backend/app/routes/routes.py | head -100Repository: AOSSIE-Org/Perspective Length of output: 3748 🏁 Script executed: cat backend/app/modules/chat/get_rag_data.pyRepository: AOSSIE-Org/Perspective Length of output: 1533 🏁 Script executed: cat backend/app/modules/chat/llm_processing.pyRepository: AOSSIE-Org/Perspective Length of output: 2247 Move blocking network calls to thread pool in Lines 94-95 execute synchronous network I/O calls ( Suggested change- results = search_pinecone(query)
- answer = ask_llm(query, results, api_key, groq_model, chat_request.article_text)
+ results = await asyncio.to_thread(search_pinecone, query)
+ answer = await asyncio.to_thread(
+ ask_llm, query, results, api_key, groq_model, chat_request.article_text
+ )📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||||||||
| logger.info(f"Chat answer generated: {answer}") | ||||||||||||||||
|
|
||||||||||||||||
| return {"answer": answer} | ||||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,6 +13,7 @@ dependencies = [ | |
| "groq>=0.28.0", | ||
| "langchain>=0.3.25", | ||
| "langchain-community>=0.3.25", | ||
| "langchain-google-genai>=2.1.12", | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: #!/bin/bash
# Search for actual usages of langchain-google-genai in Python sources
rg -n --type=py -C2 'langchain_google_genai|ChatGoogleGenerativeAI|GoogleGenerativeAI'Repository: AOSSIE-Org/Perspective Length of output: 48 🏁 Script executed: #!/bin/bash
# Also search for imports from langchain packages to get broader context
rg -n --type=py 'from langchain.*import|import langchain' | head -50Repository: AOSSIE-Org/Perspective Length of output: 428 🏁 Script executed: #!/bin/bash
# Check the pyproject.toml to see all dependencies and any optional groups
cat backend/pyproject.tomlRepository: AOSSIE-Org/Perspective Length of output: 780 🏁 Script executed: #!/bin/bash
# Check for any references to google or genai (case-insensitive) in Python files
# to catch indirect references or commented code
rg -in --type=py 'google.*genai|genai.*google'Repository: AOSSIE-Org/Perspective Length of output: 48 🏁 Script executed: #!/bin/bash
# Check if there are any imports in __init__.py or similar that might re-export it
fd -t f '\.py$' | xargs grep -l 'google_genai\|GoogleGenerativeAI' 2>/dev/null || echo "No matches found"Repository: AOSSIE-Org/Perspective Length of output: 82 Remove the unused The 🤖 Prompt for AI Agents |
||
| "langchain-groq>=0.3.2", | ||
| "langgraph>=0.4.8", | ||
| "logging>=0.4.9.6", | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Guard client initialization inside error-handling path.
At Line 37,
Groq(api_key=api_key)executes before thetry, so init/validation errors bypass your structured error response.Proposed fix
📝 Committable suggestion
🤖 Prompt for AI Agents