diff --git a/README.md b/README.md index fea8f60..8151f2b 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,7 @@ Each folder in this repo is a standalone project. Dive in to see how to solve re | Recipe | Description | |--------|-------------| | [bestbet](./bestbet) | Sports betting odds comparison tool | +| [silicon-signal](./silicon-signal) | Semiconductor supply chain tracker for lifecycle, availability, and lead-time signals | > More recipes added weekly! diff --git a/research-sentry/.env.local.example b/research-sentry/.env.local.example new file mode 100644 index 0000000..090d82b --- /dev/null +++ b/research-sentry/.env.local.example @@ -0,0 +1,3 @@ +# Rename to .env.local and add your keys +OPENAI_API_KEY=sk-your-key-here +MINO_API_KEY=your-mino-key-here diff --git a/research-sentry/.gitignore b/research-sentry/.gitignore new file mode 100644 index 0000000..5f52bd7 --- /dev/null +++ b/research-sentry/.gitignore @@ -0,0 +1,5 @@ +node_modules +.next +.env*.local +.vercel +*.tsbuildinfo diff --git a/research-sentry/README.md b/research-sentry/README.md new file mode 100644 index 0000000..149aacd --- /dev/null +++ b/research-sentry/README.md @@ -0,0 +1,51 @@ +# Research Sentry + +Live link: https://voice-research.vercel.app/ + +## What it is +Research Sentry is a voice-first academic research co-pilot that scans live portals (ArXiv, PubMed, Semantic Scholar, IEEE Xplore, and more) to assemble verified paper metadata and summaries. It uses the TinyFish Web Agent to automate multi-step portal navigation and extract structured results in real time. + +## Demo video +https://voice-research.vercel.app/ + +## TinyFish API usage (snippet) +```ts +const res = await fetch("https://mino.ai/v1/automation/run-sse", { + method: "POST", + headers: { + "X-API-Key": process.env.MINO_API_KEY!, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + url, + goal, + browser_profile: stealth ? "stealth" : "lite", + }), +}); +``` + +## How to run +1. Install deps: `npm install` +2. Create `.env.local`: +``` +MINO_API_KEY=your_tinyfish_key +OPENAI_API_KEY=your_openai_key +``` +3. Start dev server: `npm run dev` + +## Architecture diagram +```mermaid +graph TD + User((User)) -->|Voice/Text| UI[Search Interface] + UI -->|Intent| Parser[Intent Parser GPT-4] + Parser -->|Plan| Engine[Search Engine] + Engine -->|Dispatch| Agent1[TinyFish Agent: ArXiv] + Engine -->|Dispatch| Agent2[TinyFish Agent: PubMed] + Engine -->|Dispatch| Agent3[TinyFish Agent: Scholar] + Agent1 -->|Scraping| Web[Live Web DOM] + Agent2 -->|Scraping| Web + Agent3 -->|Scraping| Web + Web -->|Result| Aggregator[Synthesis & Deduplication] + Aggregator -->|JSON Payload| UI + UI -->|Visuals| Terminal[Live Log Terminal] +``` diff --git a/research-sentry/app/api/citations/track/route.ts b/research-sentry/app/api/citations/track/route.ts new file mode 100644 index 0000000..4586046 --- /dev/null +++ b/research-sentry/app/api/citations/track/route.ts @@ -0,0 +1,21 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { analyzeCitationTrend } from '@/lib/citation-tracker'; + +export async function POST(req: NextRequest) { + try { + const { paper } = await req.json(); + + if (!paper) { + return NextResponse.json({ error: 'Paper data required' }, { status: 400 }); + } + + const trackedData = await analyzeCitationTrend(paper); + + // In a real app, we would save this to a database here + + return NextResponse.json(trackedData); + } catch (error) { + console.error('Citation Tracking API Error:', error); + return NextResponse.json({ error: 'Failed to track citation' }, { status: 500 }); + } +} diff --git a/research-sentry/app/api/compare/route.ts b/research-sentry/app/api/compare/route.ts new file mode 100644 index 0000000..5ffed6b --- /dev/null +++ b/research-sentry/app/api/compare/route.ts @@ -0,0 +1,19 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { comparePapers } from '@/lib/comparator'; + +export async function POST(req: NextRequest) { + try { + const { papers } = await req.json(); + + if (!papers || papers.length < 2) { + return NextResponse.json({ error: 'Select at least 2 papers to compare' }, { status: 400 }); + } + + const comparison = await comparePapers(papers); + + return NextResponse.json(comparison); + } catch (error) { + console.error('Comparison API Error:', error); + return NextResponse.json({ error: 'Failed to generate comparison' }, { status: 500 }); + } +} diff --git a/research-sentry/app/api/conversation/route.ts b/research-sentry/app/api/conversation/route.ts new file mode 100644 index 0000000..f5dbe50 --- /dev/null +++ b/research-sentry/app/api/conversation/route.ts @@ -0,0 +1,21 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { generateConversationResponse } from '@/lib/conversation'; + +export const maxDuration = 60; + +export async function POST(req: NextRequest) { + try { + const { history, context } = await req.json(); + + if (!history || !Array.isArray(history)) { + return NextResponse.json({ error: 'Invalid history format' }, { status: 400 }); + } + + const response = await generateConversationResponse(history, context); + + return NextResponse.json(response); + } catch (error) { + console.error('Conversation API Error:', error); + return NextResponse.json({ error: 'Failed to generate response' }, { status: 500 }); + } +} diff --git a/research-sentry/app/api/emails/extract/route.ts b/research-sentry/app/api/emails/extract/route.ts new file mode 100644 index 0000000..484881c --- /dev/null +++ b/research-sentry/app/api/emails/extract/route.ts @@ -0,0 +1,309 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { runMinoAutomation } from '@/lib/mino'; +import { extractEmailsFromText } from '@/lib/email-utils'; +import { fetchPdfText } from '@/lib/pdf-utils'; + +export const maxDuration = 300; +export const runtime = 'nodejs'; + +interface AuthorInfo { + firstName: string; + lastName: string; + email: string; +} + +function capitalizeNamePart(part: string): string { + if (!part) return ''; + const lower = part.toLowerCase(); + return lower.charAt(0).toUpperCase() + lower.slice(1); +} + +function deriveNameFromEmail(email: string): Pick { + if (!email || !email.includes('@')) return { firstName: '', lastName: '' }; + const localRaw = email.split('@')[0] || ''; + const local = localRaw.split('+')[0]; + const tokens = local + .replace(/[^a-zA-Z]/g, ' ') + .split(/\s+/) + .filter(Boolean); + + if (tokens.length >= 2) { + return { + firstName: capitalizeNamePart(tokens[0]), + lastName: capitalizeNamePart(tokens[tokens.length - 1]), + }; + } + + if (tokens.length === 1) { + const camelParts = tokens[0].match(/[A-Z]?[a-z]+|[A-Z]+(?![a-z])/g) || []; + if (camelParts.length >= 2) { + const first = camelParts[0] ?? ''; + const last = camelParts[camelParts.length - 1] ?? ''; + return { + firstName: capitalizeNamePart(first), + lastName: capitalizeNamePart(last), + }; + } + if (camelParts.length === 1) { + const first = camelParts[0] ?? ''; + return { firstName: capitalizeNamePart(first), lastName: '' }; + } + } + + return { firstName: '', lastName: '' }; +} + +function tryParseJsonString(s: string): any | null { + try { + const clean = s.replace(/```json\n?|```/g, '').trim(); + return JSON.parse(clean); + } catch { + return null; + } +} + +function normalizeArxivPdfUrl(url: string): string { + // Accepts: + // - https://arxiv.org/abs/XXXX.XXXXX -> https://arxiv.org/pdf/XXXX.XXXXX.pdf + // - https://arxiv.org/pdf/XXXX.XXXXX -> https://arxiv.org/pdf/XXXX.XXXXX.pdf + // - https://arxiv.org/pdf/XXXX.XXXXX.pdf -> unchanged + if (!url) return url; + const u = url.trim(); + if (u.includes('arxiv.org/abs/')) { + const id = u.split('arxiv.org/abs/')[1]?.split(/[?#]/)[0]; + return id ? `https://arxiv.org/pdf/${id}.pdf` : u; + } + if (u.includes('arxiv.org/pdf/')) { + if (u.endsWith('.pdf')) return u; + return `${u.split(/[?#]/)[0]}.pdf`; + } + return u; +} + +function findAuthorsArray(obj: any): AuthorInfo[] { + if (!obj) return []; + + // Check if it's already an array of author objects + if (Array.isArray(obj)) { + const asAuthors = obj.filter((x) => + x && typeof x === 'object' && + ('firstName' in x || 'first_name' in x) && + ('lastName' in x || 'last_name' in x) && + ('email' in x) + ).map((x) => ({ + firstName: x.firstName || x.first_name || '', + lastName: x.lastName || x.last_name || '', + email: x.email || '' + })); + if (asAuthors.length > 0) return asAuthors; + } + + if (typeof obj === 'string') { + const parsed = tryParseJsonString(obj); + if (parsed) return findAuthorsArray(parsed); + return []; + } + + if (typeof obj !== 'object') return []; + + const keys = ['authors', 'authorInfo', 'author_info', 'authorDetails', 'author_details']; + for (const k of keys) { + if (Array.isArray(obj[k])) { + const result = findAuthorsArray(obj[k]); + if (result.length > 0) return result; + } + if (typeof obj[k] === 'string') { + const parsed = tryParseJsonString(obj[k]); + if (parsed) { + const nested = findAuthorsArray(parsed); + if (nested.length) return nested; + } + } + } + + // Recursive scan + for (const k of Object.keys(obj)) { + const v = obj[k]; + if (Array.isArray(v)) { + const result = findAuthorsArray(v); + if (result.length > 0) return result; + } else if (v && typeof v === 'object') { + const nested = findAuthorsArray(v); + if (nested.length) return nested; + } + } + + return []; +} + +function findEmailsArray(obj: any): string[] { + if (!obj) return []; + + if (Array.isArray(obj)) { + const asStrings = obj.filter((x) => typeof x === 'string') as string[]; + return asStrings.length === obj.length ? asStrings : []; + } + + if (typeof obj === 'string') { + const parsed = tryParseJsonString(obj); + if (parsed) return findEmailsArray(parsed); + return extractEmailsFromText(obj); + } + + if (typeof obj !== 'object') return []; + + const keys = ['emails', 'emailAddresses', 'email_addresses', 'authorEmails', 'author_emails', 'contacts']; + for (const k of keys) { + if (Array.isArray(obj[k])) return (obj[k] as any[]).filter((x) => typeof x === 'string') as string[]; + if (typeof obj[k] === 'string') { + const parsed = tryParseJsonString(obj[k]); + if (parsed) { + const nested = findEmailsArray(parsed); + if (nested.length) return nested; + } + } + } + + // Recursive scan for the first plausible emails array + for (const k of Object.keys(obj)) { + const v = obj[k]; + if (Array.isArray(v)) { + const strs = v.filter((x) => typeof x === 'string') as string[]; + if (strs.length >= 1 && strs.every((s) => s.includes('@'))) return strs; + } else if (v && typeof v === 'object') { + const nested = findEmailsArray(v); + if (nested.length) return nested; + } else if (typeof v === 'string') { + const extracted = extractEmailsFromText(v); + if (extracted.length) return extracted; + } + } + + return []; +} + +export async function POST(req: NextRequest) { + try { + const startedAt = Date.now(); + const totalBudgetMs = 5 * 60 * 1000; // 5 minutes + + const { paper } = await req.json(); + if (!paper) return NextResponse.json({ error: 'Paper data required' }, { status: 400 }); + + const candidatesRaw: string[] = [ + ...(paper.pdfUrl ? [paper.pdfUrl] : []), + ...(paper.url ? [paper.url] : []), + ].filter(Boolean); + + const candidates = candidatesRaw.map((u) => normalizeArxivPdfUrl(String(u))); + if (candidates.length === 0) return NextResponse.json({ error: 'No paper URL available', authors: [] }, { status: 400 }); + + // Fast/reliable path: attempt to download + parse PDF text from any candidate. + for (const url of candidates) { + try { + const elapsed = Date.now() - startedAt; + const remaining = totalBudgetMs - elapsed; + if (remaining <= 0) break; + + const text = await fetchPdfText(url, { + timeoutMs: remaining, + maxBytes: 25_000_000, // allow larger PDFs under long budget + }); + // Emails are usually on the first page, but sometimes in footers/last page. + const head = text.slice(0, 450_000); + const tail = text.length > 200_000 ? text.slice(-200_000) : ''; + const sample = `${head}\n${tail}`; + + const emailsFromPdf = extractEmailsFromText(sample); // internal normalization handles PDF spacing + if (emailsFromPdf.length > 0) { + // Return emails in the new format (names derived when possible) + const authors = emailsFromPdf.map((email) => { + const name = deriveNameFromEmail(email); + return { + firstName: name.firstName, + lastName: name.lastName, + email: email, + }; + }); + return NextResponse.json({ authors: authors.sort((a, b) => a.email.localeCompare(b.email)) }); + } + // If we successfully parsed the PDF but found no emails, stop here (avoid slow Mino + timeouts). + return NextResponse.json({ authors: [], error: 'No emails found in the PDF text.' }); + } catch (e) { + console.warn('[EmailExtract] PDF parse attempt failed for', url, e); + } + } + + const goal = `Extract all author information from this paper. +For each author, extract their first name, last name, and email address. +If the URL is a PDF, open it and look for author information in the first pages and footers. +Return ONLY valid JSON with this exact schema: +{ "authors": [{ "firstName": string, "lastName": string, "email": string }] } +No markdown, no commentary. If you cannot find first or last name, use empty strings.`; + + // Mino fallback (best-effort): allow up to remaining budget. + const elapsedBeforeMino = Date.now() - startedAt; + const remainingForMino = totalBudgetMs - elapsedBeforeMino; + if (remainingForMino <= 0) { + return NextResponse.json({ + authors: [], + error: 'Author extraction timed out after 5 minutes.', + }); + } + + const minoTarget = candidates.find((u) => u.toLowerCase().includes('pdf')) || candidates[0]; + // IMPORTANT: pass timeoutMs to ensure the underlying request is aborted (no background leak). + const raw = await runMinoAutomation(minoTarget, goal, false, { timeoutMs: remainingForMino }); + + if (!raw) { + return NextResponse.json({ authors: [], error: 'Author extraction timed out after 5 minutes.' }); + } + + const authors = findAuthorsArray(raw); + + // If we got authors with full info, return them + if (authors.length > 0) { + const normalized = authors.map(author => ({ + firstName: author.firstName.trim(), + lastName: author.lastName.trim(), + email: author.email.trim().replace(/[),.;:]+$/g, '').toLowerCase() + })).map((author) => { + if (author.email.includes('@') && (!author.firstName || !author.lastName)) { + const derived = deriveNameFromEmail(author.email); + return { + ...author, + firstName: author.firstName || derived.firstName, + lastName: author.lastName || derived.lastName, + }; + } + return author; + }).filter(author => author.email.includes('@')); + + if (normalized.length > 0) { + return NextResponse.json({ authors: normalized.sort((a, b) => a.email.localeCompare(b.email)) }); + } + } + + // Fallback: try to extract just emails + const emails = findEmailsArray(raw); + const normalizedEmails = Array.from(new Set(emails.flatMap((e) => extractEmailsFromText(e) || [e]))) + .map((e) => e.trim().replace(/[),.;:]+$/g, '').toLowerCase()) + .filter((e) => e.includes('@')); + + if (normalizedEmails.length === 0) { + return NextResponse.json({ authors: [], error: 'No author information found.' }); + } + + // Convert emails to author format + const authorsFromEmails = normalizedEmails.map(email => ({ + ...deriveNameFromEmail(email), + email: email, + })); + + return NextResponse.json({ authors: authorsFromEmails.sort((a, b) => a.email.localeCompare(b.email)) }); + } catch (error) { + console.error('Author Extract API Error:', error); + return NextResponse.json({ error: 'Failed to extract author information', authors: [] }, { status: 500 }); + } +} + diff --git a/research-sentry/app/api/export/bibtex/route.ts b/research-sentry/app/api/export/bibtex/route.ts new file mode 100644 index 0000000..072951a --- /dev/null +++ b/research-sentry/app/api/export/bibtex/route.ts @@ -0,0 +1,29 @@ +import { NextRequest, NextResponse } from 'next/server'; + +export async function POST(req: NextRequest) { + const body = await req.json().catch(() => ({})); + const papers = body?.papers; + if (!Array.isArray(papers)) { + return NextResponse.json({ error: 'papers[] is required' }, { status: 400 }); + } + + const escapeBibtex = (value: string) => + String(value ?? '') + .replace(/\\/g, '\\\\') + .replace(/[{}]/g, '\\$&'); + + const yearFrom = (dateValue: string) => + String(dateValue ?? '').match(/\b(19|20)\d{2}\b/)?.[0] ?? ''; + + const bib = papers.map((p: any, i: number) => { + const key = 'paper' + i; + return '@article{' + key + + ',\n title={' + escapeBibtex(p.title) + + '},\n author={' + escapeBibtex(p.authors?.join(' and ') || '') + + '},\n year={' + yearFrom(p.publishedDate) + + '},\n url={' + escapeBibtex(p.url) + '}\n}'; + }).join('\n\n'); + return new NextResponse(bib, { + headers: { 'Content-Type': 'application/x-bibtex', 'Content-Disposition': 'attachment; filename=papers.bib' } + }); +} diff --git a/research-sentry/app/api/health/route.ts b/research-sentry/app/api/health/route.ts new file mode 100644 index 0000000..7d6e1ca --- /dev/null +++ b/research-sentry/app/api/health/route.ts @@ -0,0 +1,5 @@ +import { NextResponse } from 'next/server'; + +export async function GET() { + return NextResponse.json({ status: 'ok' }); +} diff --git a/research-sentry/app/api/search/text/route.ts b/research-sentry/app/api/search/text/route.ts new file mode 100644 index 0000000..2a34e21 --- /dev/null +++ b/research-sentry/app/api/search/text/route.ts @@ -0,0 +1,13 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { parseSearchIntent } from '@/lib/intent-parser'; +import { searchResearchPapers } from '@/lib/search'; + +export const maxDuration = 300; + +export async function POST(req: NextRequest) { + const { query, sources } = await req.json(); + const criteria = await parseSearchIntent(query); + if (sources) criteria.sources = sources; + const results = await searchResearchPapers(criteria); + return NextResponse.json(results); +} diff --git a/research-sentry/app/api/search/voice/route.ts b/research-sentry/app/api/search/voice/route.ts new file mode 100644 index 0000000..cee48eb --- /dev/null +++ b/research-sentry/app/api/search/voice/route.ts @@ -0,0 +1,19 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { transcribeAudio } from '@/lib/whisper'; +import { parseSearchIntent } from '@/lib/intent-parser'; +import { searchResearchPapers } from '@/lib/search'; + +export const maxDuration = 300; + +export async function POST(req: NextRequest) { + const form = await req.formData(); + const audio = form.get('audio'); + if (!audio || !(audio instanceof File)) { + return NextResponse.json({ error: 'audio file is required' }, { status: 400 }); + } + const buffer = Buffer.from(await audio.arrayBuffer()); + const transcript = await transcribeAudio(buffer); + const criteria = await parseSearchIntent(transcript); + const results = await searchResearchPapers(criteria); + return NextResponse.json({ ...results, transcript }); +} diff --git a/research-sentry/app/api/summarize/route.ts b/research-sentry/app/api/summarize/route.ts new file mode 100644 index 0000000..0a375be --- /dev/null +++ b/research-sentry/app/api/summarize/route.ts @@ -0,0 +1,21 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { generatePaperSummary } from '@/lib/summarizer'; + +export const maxDuration = 120; // Allow time for generation + synthesis + +export async function POST(req: NextRequest) { + try { + const { paper, length } = await req.json(); + + if (!paper) { + return NextResponse.json({ error: 'Paper data required' }, { status: 400 }); + } + + const summary = await generatePaperSummary(paper, length); + return NextResponse.json({ summary }); + + } catch (error) { + console.error('Summary API Error:', error); + return NextResponse.json({ error: 'Failed to generate summary' }, { status: 500 }); + } +} diff --git a/research-sentry/app/globals.css b/research-sentry/app/globals.css new file mode 100644 index 0000000..b0f2b1b --- /dev/null +++ b/research-sentry/app/globals.css @@ -0,0 +1,104 @@ +@import url('https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@300;400;500;600;700&family=Inter:wght@300;400;500;600;700;800;900&display=swap'); + +@tailwind base; +@tailwind components; +@tailwind utilities; + +* { + scroll-behavior: smooth; +} + +body { + background: radial-gradient(circle at 50% -20%, #1e293b 0%, #020617 100%); + background-attachment: fixed; + min-height: 100vh; + font-family: 'Inter', system-ui, sans-serif; + color: #f8fafc; + position: relative; + overflow-x: hidden; +} + +/* Atmospheric glow effects */ +body::before { + content: ''; + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: + radial-gradient(circle at 10% 20%, rgba(16, 185, 129, 0.05) 0%, transparent 40%), + radial-gradient(circle at 90% 80%, rgba(245, 158, 11, 0.05) 0%, transparent 40%); + pointer-events: none; + z-index: 0; +} + +@keyframes float { + + 0%, + 100% { + transform: translateY(0px); + } + + 50% { + transform: translateY(-10px); + } +} + +.float { + animation: float 3s ease-in-out infinite; +} + +/* Custom scrollbar - Emerald/Amber theme */ +::-webkit-scrollbar { + width: 8px; +} + +::-webkit-scrollbar-track { + background: #020617; +} + +::-webkit-scrollbar-thumb { + background: linear-gradient(to bottom, #10b981, #f59e0b); + border-radius: 4px; +} + +::-webkit-scrollbar-thumb:hover { + background: linear-gradient(to bottom, #059669, #d97706); +} + +/* Glassmorphism optimized for Obsidian theme */ +.glass { + background: rgba(15, 23, 42, 0.8); + backdrop-filter: blur(12px) saturate(150%); + border: 1px solid rgba(255, 255, 255, 0.05); +} + +.text-shimmer { + background: linear-gradient(90deg, + #10b981 0%, + #f59e0b 25%, + #34d399 50%, + #fbbf24 75%, + #10b981 100%); + background-size: 200% auto; + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + animation: shimmer 4s linear infinite; +} + +@keyframes shimmer { + to { + background-position: 200% center; + } +} + +@layer utilities { + .transition-smooth { + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + } + + .glow-emerald { + box-shadow: 0 0 20px rgba(16, 185, 129, 0.2), 0 0 40px rgba(16, 185, 129, 0.1); + } +} \ No newline at end of file diff --git a/research-sentry/app/layout.tsx b/research-sentry/app/layout.tsx new file mode 100644 index 0000000..66504de --- /dev/null +++ b/research-sentry/app/layout.tsx @@ -0,0 +1,7 @@ +import './globals.css'; + +export const metadata = { title: 'Research Sentry' }; + +export default function RootLayout({ children }: { children: React.ReactNode }) { + return {children}; +} diff --git a/research-sentry/app/page.tsx b/research-sentry/app/page.tsx new file mode 100644 index 0000000..00b535e --- /dev/null +++ b/research-sentry/app/page.tsx @@ -0,0 +1,320 @@ +'use client'; + +import { useState } from 'react'; +import { Sparkles, Github, Zap, Mic, MessageSquare } from 'lucide-react'; +import SearchInterface from '@/components/SearchInterface'; +import ResultsGrid from '@/components/ResultsGrid'; +import ConversationInterface from '@/components/ConversationInterface'; +import LoadingSpinner from '@/components/LoadingSpinner'; +import ErrorMessage from '@/components/ErrorMessage'; +import CoPilotMode from '@/components/CoPilotMode'; +import WorkflowSelector from '@/components/WorkflowSelector'; +import CitationTracker from '@/components/CitationTracker'; +import TinyFishAgentTerminal from '@/components/TinyFishAgentTerminal'; +import { SearchResult, SourceType } from '@/lib/types'; + +export default function Home() { + const [results, setResults] = useState(null); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [selectedPapers, setSelectedPapers] = useState>(new Set()); + const [activeTab, setActiveTab] = useState<'search' | 'assistant' | 'workflows'>('search'); + const [coPilotActive, setCoPilotActive] = useState(false); + const [trackingPaperId, setTrackingPaperId] = useState(null); + const [searchTopic, setSearchTopic] = useState(''); + const [searchSources, setSearchSources] = useState([]); + + const handleTextSearch = async (query: string, sources: SourceType[]) => { + setLoading(true); + setError(null); + setSelectedPapers(new Set()); + setSearchTopic(query); + setSearchSources(sources); + + try { + const response = await fetch('/api/search/text', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ query, sources }), + }); + + if (!response.ok) { + throw new Error(`Search failed: ${response.statusText}`); + } + + const data = await response.json(); + setResults(data); + } catch (err) { + setError(err instanceof Error ? err.message : 'An error occurred during search'); + } finally { + setLoading(false); + } + }; + + const handleVoiceSearch = async (audioBlob: Blob) => { + setLoading(true); + setError(null); + setSelectedPapers(new Set()); + setSearchTopic('Voice Discovery Pattern'); + setSearchSources(['arxiv', 'pubmed', 'semantic_scholar']); + + try { + const formData = new FormData(); + formData.append('audio', audioBlob, 'recording.webm'); + + const response = await fetch('/api/search/voice', { + method: 'POST', + body: formData, + }); + + if (!response.ok) { + throw new Error(`Voice search failed: ${response.statusText}`); + } + + const data = await response.json(); + setResults(data); + } catch (err) { + setError(err instanceof Error ? err.message : 'An error occurred during voice search'); + } finally { + setLoading(false); + } + }; + + const togglePaperSelection = (paperId: string) => { + setSelectedPapers(prev => { + const next = new Set(prev); + if (next.has(paperId)) { + next.delete(paperId); + } else { + next.add(paperId); + } + return next; + }); + }; + + const handleExport = async () => { + if (!results || selectedPapers.size === 0) return; + + const papersToExport = results.papers.filter(p => selectedPapers.has(p.id)); + + try { + const response = await fetch('/api/export/bibtex', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ papers: papersToExport }), + }); + + if (!response.ok) { + throw new Error('Export failed'); + } + + const blob = await response.blob(); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = 'papers.bib'; + document.body.appendChild(a); + a.click(); + window.URL.revokeObjectURL(url); + document.body.removeChild(a); + } catch (err) { + setError('Failed to export papers'); + } + }; + + const retrySearch = () => { + setError(null); + }; + + return ( +
+ {/* Header */} +
+
+
+ +
+
+

+ Research Sentry +

+
+

+ Your AI Research Co-Pilot +

+

+ Search academic papers using your voice or text. Powered by OpenAI, GPT-4, and TinyFish Web Agent. +

+ + {/* Features badges */} +
+
+ + 8+ Sources +
+
+ + AI Powered +
+
+ + Voice First +
+
+ + Export Ready +
+
+
+ + {/* Tabs */} +
+ + + + +
+ + {coPilotActive && results && ( + setCoPilotActive(false)} + /> + )} + + {/* Citation Tracker Modal */} + {trackingPaperId && results && ( +
+
+ + p.id === trackingPaperId)!} + /> +
+
+ )} + +
+ {activeTab === 'workflows' ? ( +
+ +
+ ) : activeTab === 'search' ? ( +
+ {/* Search Interface */} +
+ +
+ + {/* Loading State */} + {loading && ( +
+
+
+
+ AGENTIC DISCOVERY IN PROGRESS +
+

TinyFish Agent Operation

+

Real-time browser automation & cross-portal evidence extraction

+
+ + + +
+ +

Compiling findings from 8 research nodes

+
+
+ )} + + {/* Error State */} + {error && !loading && ( +
+ +
+ )} + + {/* Results */} + {!loading && results && ( +
+ setTrackingPaperId(id)} + /> +
+ )} + + {!loading && !results && !error && ( +
+

Perform a search to begin your discovery

+
+ )} +
+ ) : ( +
+ +
+ )} +
+ + {/* Footer */} +
+
+ + +
+
+
+ ); +} diff --git a/research-sentry/components/AudioPlayer.tsx b/research-sentry/components/AudioPlayer.tsx new file mode 100644 index 0000000..f42dd57 --- /dev/null +++ b/research-sentry/components/AudioPlayer.tsx @@ -0,0 +1,97 @@ +'use client'; + +import { useState, useRef, useEffect } from 'react'; +import { Play, Pause, FastForward, Loader2, Download } from 'lucide-react'; + +interface AudioPlayerProps { + src?: string; + title?: string; + onGenerate?: () => void; + isGenerating?: boolean; +} + +export default function AudioPlayer({ src, title, onGenerate, isGenerating }: AudioPlayerProps) { + const [isPlaying, setIsPlaying] = useState(false); + const [progress, setProgress] = useState(0); + const audioRef = useRef(null); + + useEffect(() => { + if (src && audioRef.current) { + audioRef.current.play().then(() => setIsPlaying(true)).catch(() => setIsPlaying(false)); + } + }, [src]); + + const togglePlay = () => { + if (!audioRef.current) return; + if (isPlaying) { + audioRef.current.pause(); + } else { + audioRef.current.play(); + } + setIsPlaying(!isPlaying); + }; + + const handleTimeUpdate = () => { + if (audioRef.current) { + const current = audioRef.current.currentTime; + const duration = audioRef.current.duration; + setProgress((current / duration) * 100); + } + }; + + const skipForward = () => { + if (audioRef.current) { + audioRef.current.currentTime += 15; + } + }; + + if (!src && !isGenerating && !onGenerate) return null; + + return ( +
+
+

{title || 'Audio Summary'}

+ {!src && !isGenerating && onGenerate && ( + + )} +
+ + {isGenerating && ( +
+ Generating AI Summary... +
+ )} + + {src && ( + <> +