Skip to content

Commit ada2429

Browse files
committed
added guide for Anthropics Citations API
1 parent 7174340 commit ada2429

File tree

1 file changed

+168
-1
lines changed

1 file changed

+168
-1
lines changed

guides/05_chatbots/03_agents-and-tool-usage.md

Lines changed: 168 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,6 @@ tools = load_tools(["serpapi"])
161161

162162
# Get the prompt to use - you can modify this!
163163
prompt = hub.pull("hwchase17/openai-tools-agent")
164-
# print(prompt.messages) -- to see the prompt
165164
agent = create_openai_tools_agent(
166165
model.with_config({"tags": ["agent_llm"]}), tools, prompt
167166
)
@@ -346,3 +345,171 @@ This creates a chatbot that:
346345

347346
That's it! You now have a chatbot that not only responds to users but also shows its thinking process, creating a more transparent and engaging interaction. See our finished Gemini 2.0 Flash Thinking demo [here](https://huggingface.co/spaces/ysharma/Gemini2-Flash-Thinking).
348347

348+
349+
## Building with Citations
350+
351+
The Gradio Chatbot can display citations from LLM responses, making it perfect for creating UIs that show source documentation and references. This guide will show you how to build a chatbot that displays Claude's citations in real-time.
352+
353+
### A real example using Anthropic's Citations API
354+
Let's create a complete chatbot that shows both responses and their supporting citations. We'll use Anthropic's Claude API with citations enabled and Gradio for the UI.
355+
356+
We'll begin with imports and setting up the Anthropic client. Note that you'll need an `ANTHROPIC_API_KEY` environment variable set:
357+
358+
```python
359+
import gradio as gr
360+
import anthropic
361+
import base64
362+
from typing import List, Dict, Any
363+
364+
client = anthropic.Anthropic()
365+
```
366+
367+
First, let's set up our message formatting functions that handle document preparation:
368+
369+
```python
370+
def encode_pdf_to_base64(file_obj) -> str:
371+
"""Convert uploaded PDF file to base64 string."""
372+
if file_obj is None:
373+
return None
374+
with open(file_obj.name, 'rb') as f:
375+
return base64.b64encode(f.read()).decode('utf-8')
376+
377+
def format_message_history(
378+
history: list,
379+
enable_citations: bool,
380+
doc_type: str,
381+
text_input: str,
382+
pdf_file: str
383+
) -> List[Dict]:
384+
"""Convert Gradio chat history to Anthropic message format."""
385+
formatted_messages = []
386+
387+
# Add previous messages
388+
for msg in history[:-1]:
389+
if msg["role"] == "user":
390+
formatted_messages.append({"role": "user", "content": msg["content"]})
391+
392+
# Prepare the latest message with document
393+
latest_message = {"role": "user", "content": []}
394+
395+
if enable_citations:
396+
if doc_type == "plain_text":
397+
latest_message["content"].append({
398+
"type": "document",
399+
"source": {
400+
"type": "text",
401+
"media_type": "text/plain",
402+
"data": text_input.strip()
403+
},
404+
"title": "Text Document",
405+
"citations": {"enabled": True}
406+
})
407+
elif doc_type == "pdf" and pdf_file:
408+
pdf_data = encode_pdf_to_base64(pdf_file)
409+
if pdf_data:
410+
latest_message["content"].append({
411+
"type": "document",
412+
"source": {
413+
"type": "base64",
414+
"media_type": "application/pdf",
415+
"data": pdf_data
416+
},
417+
"title": pdf_file.name,
418+
"citations": {"enabled": True}
419+
})
420+
421+
# Add the user's question
422+
latest_message["content"].append({"type": "text", "text": history[-1]["content"]})
423+
424+
formatted_messages.append(latest_message)
425+
return formatted_messages
426+
```
427+
428+
Then, let's create our bot response handler that processes citations:
429+
430+
```python
431+
def bot_response(
432+
history: list,
433+
enable_citations: bool,
434+
doc_type: str,
435+
text_input: str,
436+
pdf_file: str
437+
) -> List[Dict[str, Any]]:
438+
try:
439+
messages = format_message_history(history, enable_citations, doc_type, text_input, pdf_file)
440+
response = client.messages.create(model="claude-3-5-sonnet-20241022", max_tokens=1024, messages=messages)
441+
442+
# Initialize main response and citations
443+
main_response = ""
444+
citations = []
445+
446+
# Process each content block
447+
for block in response.content:
448+
if block.type == "text":
449+
main_response += block.text
450+
if enable_citations and hasattr(block, 'citations') and block.citations:
451+
for citation in block.citations:
452+
if citation.cited_text not in citations:
453+
citations.append(citation.cited_text)
454+
455+
# Add main response
456+
history.append({"role": "assistant", "content": main_response})
457+
458+
# Add citations in a collapsible section
459+
if enable_citations and citations:
460+
history.append({
461+
"role": "assistant",
462+
"content": "\n".join([f"{cite}" for cite in citations]),
463+
"metadata": {"title": "📚 Citations"}
464+
})
465+
466+
return history
467+
468+
except Exception as e:
469+
history.append({
470+
"role": "assistant",
471+
"content": "I apologize, but I encountered an error while processing your request."
472+
})
473+
return history
474+
```
475+
476+
Finally, let's create the Gradio interface:
477+
478+
```python
479+
with gr.Blocks() as demo:
480+
gr.Markdown("# Chat with Citations")
481+
482+
with gr.Row(scale=1):
483+
with gr.Column(scale=4):
484+
chatbot = gr.Chatbot(type="messages", bubble_full_width=False, show_label=False, scale=1)
485+
msg = gr.Textbox(placeholder="Enter your message here...", show_label=False, container=False)
486+
487+
with gr.Column(scale=1):
488+
enable_citations = gr.Checkbox(label="Enable Citations", value=True, info="Toggle citation functionality" )
489+
doc_type_radio = gr.Radio( choices=["plain_text", "pdf"], value="plain_text", label="Document Type", info="Choose the type of document to use")
490+
text_input = gr.Textbox(label="Document Content", lines=10, info="Enter the text you want to reference")
491+
pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"], file_count="single", visible=False)
492+
493+
# Handle message submission
494+
msg.submit(
495+
user_message,
496+
[msg, chatbot, enable_citations, doc_type_radio, text_input, pdf_input],
497+
[msg, chatbot]
498+
).then(
499+
bot_response,
500+
[chatbot, enable_citations, doc_type_radio, text_input, pdf_input],
501+
chatbot
502+
)
503+
504+
demo.launch()
505+
```
506+
507+
This creates a chatbot that:
508+
- Supports both plain text and PDF documents for Claude to cite from
509+
- Displays Citations in collapsible sections using our `metadata` feature
510+
- Shows source quotes directly from the given documents
511+
512+
The citations feature works particularly well with the Gradio Chatbot's `metadata` support, allowing us to create collapsible sections that keep the chat interface clean while still providing easy access to source documentation.
513+
514+
That's it! You now have a chatbot that not only responds to users but also shows its sources, creating a more transparent and trustworthy interaction. See our finished Citations demo [here](https://huggingface.co/spaces/ysharma/anthropic-citations-with-gradio-metadata-key).
515+

0 commit comments

Comments
 (0)