From e697c1dd8c65f76c0d1783451f31a86693d751d0 Mon Sep 17 00:00:00 2001
From: VirenMohindra
Date: Wed, 11 Mar 2026 17:31:23 -0400
Subject: [PATCH] feat: add likes support, Twitter archive import, smart
re-import, and quote tweets
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Support importing Twitter/X likes alongside bookmarks (bookmarklet, console, file upload, Live Import API)
- Add Twitter data archive import (.js and .zip files from "Download your data")
- Smart re-import: update existing tweets when incoming data is richer (longer text, real author handles, new media), automatically re-queue for AI categorization
- Extract quoted tweet content for better categorization (a tweet quoting a tech thread no longer miscategorizes as health/wellness)
- Live Import API via Twitter's internal GraphQL endpoints for both bookmarks and likes
- Handle Twitter's user data structure changes (screen_name moved from legacy to core)
- Move Twitter bearer token to X_BEARER_TOKEN env var (was hardcoded)
- Update UI language: "bookmarks" → "tweets" where both sources are represented
- Add source filter (bookmarks/likes) to browse page
- Add 34 parser tests (vitest) covering all import formats and edge cases
- Update README and CLAUDE.md with all new features and import methods
- Add .gitignore entries for user data files and DB backups
---
.env.example | 5 +
.gitignore | 7 +-
CLAUDE.md | 8 +-
README.md | 110 +++--
app/ai-search/page.tsx | 2 +-
app/api/import/route.ts | 222 +++++----
app/api/import/twitter/route.ts | 132 +++---
app/bookmarks/page.tsx | 6 +-
app/categories/[slug]/page.tsx | 4 +-
app/categories/page.tsx | 8 +-
app/categorize/page.tsx | 6 +-
app/import/page.tsx | 168 ++++++-
app/layout.tsx | 2 +-
app/mindmap/page.tsx | 14 +-
app/page.tsx | 10 +-
app/settings/page.tsx | 16 +-
components/bookmark-card.tsx | 4 +-
lib/parser.test.ts | 474 +++++++++++++++++++
lib/parser.ts | 103 +++-
lib/twitter-api.ts | 3 +-
lib/upsert-tweet.ts | 143 ++++++
package-lock.json | 801 +++++++++++++++++++++++++++++++-
package.json | 7 +-
vitest.config.mts | 17 +
24 files changed, 2016 insertions(+), 256 deletions(-)
create mode 100644 lib/parser.test.ts
create mode 100644 lib/upsert-tweet.ts
create mode 100644 vitest.config.mts
diff --git a/.env.example b/.env.example
index 61d7e03..529ee3e 100644
--- a/.env.example
+++ b/.env.example
@@ -22,6 +22,11 @@ DATABASE_URL="file:./prisma/dev.db"
# Optional: custom API base URL (proxy or local model server)
# ANTHROPIC_BASE_URL=
+# ── Twitter/X Live Import (optional) ──────────────────────────────────
+# Required only for the /api/import/twitter Live Import endpoint.
+# This is the public app-only bearer token from the Twitter web client.
+# X_BEARER_TOKEN=your-twitter-bearer-token-here
+
# ── Access control (optional) ────────────────────────────────────────
# Set BOTH to enable HTTP Basic Auth on the entire app.
diff --git a/.gitignore b/.gitignore
index f9fa46a..a37e458 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,14 +46,19 @@ next-env.d.ts
# generated prisma client
/app/generated/prisma
-# Database files — never commit these (contain your private bookmarks)
+# Database files — never commit these (contain your private tweets)
prisma/dev.db
prisma/dev.db-journal
+prisma/dev.db.backup-*
dev.db
dev.db-journal
*.db
*.db-journal
+# User data exports — never commit
+bookmarks.json
+likes.json
+
# OS
.DS_Store
Thumbs.db
diff --git a/CLAUDE.md b/CLAUDE.md
index 29041ca..7df9bc2 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -1,6 +1,6 @@
# Siftly
-Self-hosted Twitter/X bookmark manager with AI-powered categorization, search, and visualization.
+Self-hosted Twitter/X bookmark & likes manager with AI-powered categorization, search, and visualization.
## Quick Setup
@@ -34,6 +34,7 @@ To verify it's working, hit: `GET /api/settings/cli-status`
```bash
npx next dev # Start dev server (port 3000)
npx tsc --noEmit # Type check
+npm test # Run parser tests (vitest)
npx prisma studio # Database GUI
npx prisma db push # Apply schema changes to DB
npm run build # Production build
@@ -45,7 +46,8 @@ npm run build # Production build
app/
api/
categorize/ # 4-stage AI pipeline (start/stop/status via SSE)
- import/ # Bookmark JSON import + dedup
+ import/ # Multi-format import (JSON, .js, .zip) with dedup + update-on-reimport
+ twitter/ # Live Import via Twitter GraphQL API (bookmarks + likes)
search/ai/ # FTS5 + Claude semantic search
settings/
cli-status/ # GET — returns Claude CLI auth status
@@ -68,7 +70,7 @@ lib/
vision-analyzer.ts # Image vision + semantic tagging
fts.ts # SQLite FTS5 full-text search
rawjson-extractor.ts # Entity extraction from tweet JSON
- parser.ts # Multi-format bookmark JSON parser
+ parser.ts # Multi-format parser (bookmarklet, console, Twitter archive .js/.zip)
exporter.ts # CSV / JSON / ZIP export
prisma/schema.prisma # SQLite schema (Bookmark, Category, MediaItem, Setting, ImportJob)
diff --git a/README.md b/README.md
index 14f36f9..d627307 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
Siftly
- Self-hosted Twitter/X bookmark manager with AI-powered organization
+ Self-hosted Twitter/X bookmark & likes manager with AI-powered organization
Import · Analyze · Categorize · Search · Explore
@@ -20,26 +20,26 @@
## What is Siftly?
-Siftly turns your Twitter/X bookmarks into a **searchable, categorized, visual knowledge base** — running entirely on your machine. No cloud, no subscriptions, no browser extensions required. Everything stays local except the AI API calls you configure.
+Siftly turns your Twitter/X **bookmarks and likes** into a **searchable, categorized, visual knowledge base** — running entirely on your machine. No cloud, no subscriptions, no browser extensions required. Everything stays local except the AI API calls you configure.
-It runs a **4-stage AI pipeline** on your bookmarks:
+It runs a **4-stage AI pipeline** on your tweets:
```
-📥 Import (built-in bookmarklet or console script — no extensions needed)
+📥 Import (bookmarklet, file upload, Twitter archive, or Live Import API)
↓
🏷️ Entity Extraction — mines hashtags, URLs, mentions, and 100+ known tools from raw tweet data (free, zero API calls)
↓
👁️ Vision Analysis — reads text, objects, and context from every image/GIF/video thumbnail (30–40 visual tags per image)
↓
-🧠 Semantic Tagging — generates 25–35 searchable tags per bookmark for AI-powered search
+🧠 Semantic Tagging — generates 25–35 searchable tags per tweet for AI-powered search
↓
-📂 Categorization — assigns each bookmark to 1–3 categories with confidence scores
+📂 Categorization — assigns each tweet to 1–3 categories with confidence scores
```
After the pipeline runs, you get:
-- **AI search** — find bookmarks by meaning, not just keywords (*"funny meme about crypto crashing"*)
-- **Interactive mindmap** — explore your entire bookmark graph visually
-- **Filtered browsing** — grid or list view, filter by category, media type, and date
+- **AI search** — find tweets by meaning, not just keywords (*"funny meme about crypto crashing"*)
+- **Interactive mindmap** — explore your entire tweet graph visually
+- **Filtered browsing** — grid or list view, filter by category, source (bookmarks/likes), media type, and date
- **Export tools** — download media, export as CSV / JSON / ZIP
---
@@ -120,33 +120,55 @@ New accounts include $5 free credit — enough for thousands of bookmarks at Hai
---
-## Importing Your Bookmarks
+## Importing Your Tweets
-Siftly has **built-in import tools** — no browser extensions required. Go to the **Import** page and choose either method:
+Siftly supports **4 import methods** for both bookmarks and likes — no browser extensions required.
### Method A — Bookmarklet *(Recommended)*
1. Go to **Import** in the Siftly sidebar
2. Drag the **"Export X Bookmarks"** link to your browser's bookmark bar
- *(or right-click the bookmark bar → Add Bookmark → paste the URL)*
-3. Go to [x.com/i/bookmarks](https://x.com/i/bookmarks) while logged in to X
-4. Click **"Export X Bookmarks"** in your bookmark bar — a purple button appears on the page
-5. Click **"▶ Auto-scroll"** — the tool scrolls through and captures all your bookmarks automatically
-6. When complete, click the purple **"Export N bookmarks"** button — `bookmarks.json` downloads
-7. Back in Siftly → **Import** → drop or upload the file
+3. Navigate to [x.com/i/bookmarks](https://x.com/i/bookmarks) (or your likes page) while logged in
+4. Click the bookmarklet — a purple capture button appears on the page
+5. Click **"▶ Auto-scroll"** — captures all visible tweets automatically
+6. Export → upload to Siftly
+
+The bookmarklet also captures **quoted tweet content** for more accurate categorization.
### Method B — Browser Console Script
-1. Go to [x.com/i/bookmarks](https://x.com/i/bookmarks) while logged in to X
-2. Open DevTools: press `F12` (Windows/Linux) or `⌘⌥J` (Mac), then go to the **Console** tab
-3. Copy the console script from the Siftly Import page, paste it into the console, and press Enter
-4. Click **"▶ Auto-scroll"** and wait for all bookmarks to be captured
-5. Click the export button — `bookmarks.json` downloads automatically
-6. Back in Siftly → **Import** → upload the file
+1. Open [x.com/i/bookmarks](https://x.com/i/bookmarks) with DevTools Console (`F12` or `⌘⌥J`)
+2. Paste the console script from the Import page, press Enter
+3. Auto-scroll → export → upload
+
+### Method C — Twitter Data Archive
+
+1. Request your data archive from Twitter: Settings → Your Account → Download an archive
+2. Extract the ZIP and find `like.js` or `bookmarks.js`
+3. Upload the `.js` file (or the whole `.zip`) directly to Siftly
+
+Siftly auto-detects the source (bookmarks vs likes) from the archive filename.
+
+### Method D — Live Import API
+
+For direct API access using your Twitter session cookies:
+
+```bash
+curl -X POST http://localhost:3000/api/import/twitter \
+ -H 'Content-Type: application/json' \
+ -d '{"authToken":"YOUR_AUTH_TOKEN","ct0":"YOUR_CT0","source":"bookmark"}'
+```
+
+For likes, add `"source":"like"` and `"userId":"YOUR_USER_ID"`.
+
+> Requires `X_BEARER_TOKEN` in your `.env` file — see `.env.example`.
-### Re-importing
+### Smart Re-importing
-Re-import anytime — Siftly automatically skips duplicates and only adds new bookmarks.
+Re-import anytime — Siftly handles deduplication intelligently:
+- **Skips** tweets that already exist with equal or better data
+- **Updates** existing tweets when incoming data is richer (longer text from quoted tweets, real author handles replacing `@unknown`, new media)
+- Updated tweets are automatically re-queued for AI categorization
---
@@ -155,7 +177,7 @@ Re-import anytime — Siftly automatically skips duplicates and only adds new bo
**Categorization starts automatically as soon as you import.** You can also trigger it manually from:
- The **Import** page (after upload)
-- The **Mindmap** page (when bookmarks are uncategorized)
+- The **Mindmap** page (when tweets are uncategorized)
- The **Categorize** page in the sidebar
### The 4-Stage Pipeline
@@ -164,10 +186,14 @@ Re-import anytime — Siftly automatically skips duplicates and only adds new bo
|-------|-------------|
| **Entity Extraction** | Mines hashtags, URLs, @mentions, and 100+ known tool/product names from stored tweet JSON — free, zero API calls |
| **Vision Analysis** | Analyzes every image, GIF, and video thumbnail — OCR text, objects, scene, mood, meme templates, 30–40 visual tags per image |
-| **Semantic Tagging** | Generates 25–35 precise search tags per bookmark by combining tweet text + image context. Also extracts sentiment, people, and company names. |
-| **Categorization** | Assigns 1–3 categories per bookmark with confidence scores using all enriched data |
+| **Semantic Tagging** | Generates 25–35 precise search tags per tweet by combining tweet text + image context. Also extracts sentiment, people, and company names. |
+| **Categorization** | Assigns 1–3 categories per tweet with confidence scores using all enriched data |
-The pipeline is **incremental** — if interrupted, it picks up where it left off. Use **"Re-run everything (force all)"** to re-analyze bookmarks that were already processed.
+The pipeline is **incremental** — if interrupted, it picks up where it left off. Use **"Re-run everything (force all)"** to re-analyze tweets that were already processed.
+
+### Quote Tweets
+
+Siftly extracts quoted tweet content and appends it to the parent tweet text for better categorization. A tweet that says "The sleep debt is real" but quotes a thread about Claude Code will correctly be categorized as tech — not health.
---
@@ -175,7 +201,7 @@ The pipeline is **incremental** — if interrupted, it picks up where it left of
### 🔍 AI Search
-Natural language queries across all bookmark data:
+Natural language queries across all tweet data:
- *"funny meme about crypto crashing"*
- *"react hooks tutorial"*
@@ -186,17 +212,17 @@ Searches tweet text, image OCR, visual tags, semantic tags, and categories simul
### 🗺️ Mindmap
-Interactive force-directed graph showing all bookmarks organized by category:
+Interactive force-directed graph showing all tweets organized by category:
-- Expand/collapse any category to reveal its bookmarks
-- Click a bookmark node to open the original tweet on X
+- Expand/collapse any category to reveal its tweets
+- Click a tweet node to open the original on X
- Color-coded legend by category
-- If bookmarks aren't categorized yet, an inline **AI Categorize** button starts the pipeline without leaving the page
+- If tweets aren't categorized yet, an inline **AI Categorize** button starts the pipeline without leaving the page
### 📚 Browse & Filter
- **Grid view** (masonry layout) or **List view**
-- Filter by category, media type (photo / video), or search text
+- Filter by category, source (bookmarks / likes), media type (photo / video), or search text
- Sort by newest or oldest
- Pagination with 24 items per page
- Active filter chips — removable individually or all at once
@@ -241,6 +267,7 @@ All settings are manageable in the **Settings** page at `/settings` or via envir
| API Base URL | `ANTHROPIC_BASE_URL` | Custom endpoint for proxies or local Anthropic-compatible models |
| AI Model | Settings page only | Haiku 4.5 (default, fastest/cheapest), Sonnet 4.6, Opus 4.6 |
| OpenAI Key | Settings page only | Alternative provider if no Anthropic key is set |
+| X Bearer Token | `X_BEARER_TOKEN` | Required for Live Import API only (see `.env.example`) |
| Database | `DATABASE_URL` | SQLite file path (default: `file:./prisma/dev.db`) |
### Custom API Endpoint
@@ -266,9 +293,9 @@ siftly/
│ │ │ └── [slug]/ # Individual category operations
│ │ ├── categorize/ # 4-stage AI pipeline (start, status, stop)
│ │ ├── export/ # CSV, JSON, ZIP export
-│ │ ├── import/ # JSON file import with dedup + auto-pipeline trigger
+│ │ ├── import/ # Multi-format import with dedup + update-on-reimport
│ │ │ ├── bookmarklet/ # Bookmarklet-specific import endpoint
-│ │ │ └── twitter/ # Twitter-specific import endpoint
+│ │ │ └── twitter/ # Live Import via Twitter GraphQL API (bookmarks + likes)
│ │ ├── link-preview/ # Server-side OG metadata scraper
│ │ ├── media/ # Media proxy/download endpoint
│ │ ├── mindmap/ # Graph nodes + edges for visualization
@@ -306,7 +333,7 @@ siftly/
│ ├── image-context.ts # Shared image context builder
│ ├── fts.ts # SQLite FTS5 full-text search index
│ ├── rawjson-extractor.ts # Entity extraction from raw tweet JSON
-│ ├── parser.ts # Multi-format JSON parser
+│ ├── parser.ts # Multi-format JSON parser (bookmarklet, console, Twitter archive)
│ ├── exporter.ts # CSV, JSON, ZIP export
│ ├── types.ts # Shared TypeScript types
│ └── db.ts # Prisma client singleton
@@ -321,7 +348,7 @@ siftly/
### Database Schema
```
-Bookmark — tweet text, author, date, raw JSON, semantic tags, enrichment metadata
+Bookmark — tweet text, author, date, source (bookmark/like), raw JSON, semantic tags, enrichment metadata
├── MediaItem — images / videos / GIFs with AI-generated image tags
└── BookmarkCategory — category assignments with confidence scores (0–1)
@@ -377,6 +404,9 @@ npx next dev
# Type check
npx tsc --noEmit
+# Run tests
+npm test
+
# Open database GUI
npx prisma studio
@@ -410,7 +440,7 @@ Add domain strings to `KNOWN_TOOL_DOMAINS` in `lib/rawjson-extractor.ts` to have
- All data is stored **locally** in a SQLite file on your machine
- The only external calls are to the AI provider you configure (tweet text + image data)
- No telemetry, no tracking, no accounts required
-- Your bookmarks never touch any third-party server except your configured AI endpoint
+- Your tweets never touch any third-party server except your configured AI endpoint
---
diff --git a/app/ai-search/page.tsx b/app/ai-search/page.tsx
index e716e80..da23fb7 100644
--- a/app/ai-search/page.tsx
+++ b/app/ai-search/page.tsx
@@ -222,7 +222,7 @@ export default function AISearchPage() {
{searched && !loading && results.length === 0 && !error && (
-
No bookmarks matched that description. Try different words.
+
No tweets matched that description. Try different words.
)}
diff --git a/app/api/import/route.ts b/app/api/import/route.ts
index 9842712..fb76ed4 100644
--- a/app/api/import/route.ts
+++ b/app/api/import/route.ts
@@ -1,6 +1,48 @@
import { NextRequest, NextResponse } from 'next/server'
import prisma from '@/lib/db'
-import { parseBookmarksJson } from '@/lib/parser'
+import { parseTweetsJson, parseTweetsWithMeta } from '@/lib/parser'
+import type { ParsedTweet } from '@/lib/parser'
+import { upsertTweets, type IncomingTweet } from '@/lib/upsert-tweet'
+import JSZip from 'jszip'
+
+async function extractArchiveFiles(zipBuffer: ArrayBuffer): Promise<{ filename: string; content: string }[]> {
+ const zip = await JSZip.loadAsync(zipBuffer)
+ const results: { filename: string; content: string }[] = []
+ for (const [path, entry] of Object.entries(zip.files)) {
+ if (entry.dir) continue
+ const name = path.split('/').pop() ?? ''
+ // Match like.js, bookmark.js, and partitioned files like like.part0.js
+ if (/^(like|bookmark)(\.part\d+)?\.js$/.test(name)) {
+ const content = await entry.async('string')
+ results.push({ filename: name, content })
+ }
+ }
+ return results
+}
+
+function toIncomingTweets(bookmarks: ParsedTweet[], source: string): IncomingTweet[] {
+ return bookmarks.map((b) => ({
+ tweetId: b.tweetId,
+ text: b.text,
+ authorHandle: b.authorHandle,
+ authorName: b.authorName,
+ tweetCreatedAt: b.tweetCreatedAt,
+ rawJson: b.rawJson,
+ source,
+ media: b.media,
+ }))
+}
+
+function resolveSource(
+ sourceParam: string | undefined,
+ detectedSource: 'like' | 'bookmark' | undefined,
+ jsonSource: string | undefined,
+): string {
+ if (detectedSource) return detectedSource
+ if (sourceParam === 'like' || sourceParam === 'bookmark') return sourceParam
+ if (jsonSource === 'like') return 'like'
+ return 'bookmark'
+}
export async function POST(request: NextRequest): Promise {
let formData: FormData
@@ -21,13 +63,7 @@ export async function POST(request: NextRequest): Promise {
const filename =
file instanceof File ? file.name : 'bookmarks.json'
-
- let jsonString: string
- try {
- jsonString = await file.text()
- } catch {
- return NextResponse.json({ error: 'Failed to read file content' }, { status: 400 })
- }
+ const ext = filename.split('.').pop()?.toLowerCase() ?? ''
// Create an import job to track progress
const importJob = await prisma.importJob.create({
@@ -39,9 +75,98 @@ export async function POST(request: NextRequest): Promise {
},
})
- let parsedBookmarks
try {
- parsedBookmarks = parseBookmarksJson(jsonString)
+ let totalParsed = 0
+ let importedCount = 0
+ let skippedCount = 0
+ let updatedCount = 0
+ let erroredCount = 0
+
+ if (ext === 'zip') {
+ // --- ZIP archive: extract like.js / bookmark.js files ---
+ const zipBuffer = await file.arrayBuffer()
+ const archiveFiles = await extractArchiveFiles(zipBuffer)
+
+ if (archiveFiles.length === 0) {
+ await prisma.importJob.update({
+ where: { id: importJob.id },
+ data: { status: 'error', errorMessage: 'No like.js or bookmark.js files found in ZIP' },
+ })
+ return NextResponse.json(
+ { error: 'No like.js or bookmark.js files found in ZIP' },
+ { status: 422 }
+ )
+ }
+
+ for (const archiveFile of archiveFiles) {
+ const { tweets, detectedSource } = parseTweetsWithMeta(archiveFile.content)
+ const source = resolveSource(sourceParam ?? undefined, detectedSource, undefined)
+ const result = await upsertTweets(toIncomingTweets(tweets, source))
+ totalParsed += tweets.length
+ importedCount += result.imported
+ skippedCount += result.skipped
+ updatedCount += result.updated
+ erroredCount += result.errored
+ }
+ } else if (ext === 'js') {
+ // --- Archive .js file (like.js, bookmark.js) ---
+ const content = await file.text()
+ const { tweets, detectedSource } = parseTweetsWithMeta(content)
+ const source = resolveSource(sourceParam ?? undefined, detectedSource, undefined)
+ totalParsed = tweets.length
+ const result = await upsertTweets(toIncomingTweets(tweets, source))
+ importedCount = result.imported
+ skippedCount = result.skipped
+ updatedCount = result.updated
+ erroredCount = result.errored
+ } else {
+ // --- JSON file ---
+ let jsonString: string
+ try {
+ jsonString = await file.text()
+ } catch {
+ await prisma.importJob.update({
+ where: { id: importJob.id },
+ data: { status: 'error', errorMessage: 'Failed to read file content' },
+ })
+ return NextResponse.json({ error: 'Failed to read file content' }, { status: 400 })
+ }
+
+ const parsedTweets = parseTweetsJson(jsonString)
+
+ // Detect source from JSON payload
+ let jsonSource: string | undefined
+ try {
+ const parsed = JSON.parse(jsonString)
+ if (typeof parsed?.source === 'string') jsonSource = parsed.source
+ } catch { /* already parsed above */ }
+ const source = resolveSource(sourceParam ?? undefined, undefined, jsonSource)
+
+ totalParsed = parsedTweets.length
+ const result = await upsertTweets(toIncomingTweets(parsedTweets, source))
+ importedCount = result.imported
+ skippedCount = result.skipped
+ updatedCount = result.updated
+ erroredCount = result.errored
+ }
+
+ await prisma.importJob.update({
+ where: { id: importJob.id },
+ data: {
+ status: 'done',
+ totalCount: totalParsed,
+ processedCount: importedCount,
+ },
+ })
+
+ return NextResponse.json({
+ jobId: importJob.id,
+ imported: importedCount,
+ skipped: skippedCount,
+ updated: updatedCount,
+ errored: erroredCount,
+ parsed: totalParsed,
+ })
} catch (err) {
await prisma.importJob.update({
where: { id: importJob.id },
@@ -51,83 +176,8 @@ export async function POST(request: NextRequest): Promise {
},
})
return NextResponse.json(
- { error: `Failed to parse bookmarks JSON: ${err instanceof Error ? err.message : String(err)}` },
+ { error: `Import failed: ${err instanceof Error ? err.message : String(err)}` },
{ status: 422 }
)
}
-
- // Determine source: formData param > JSON field > default "bookmark"
- let jsonSource: string | undefined
- try {
- const parsed = JSON.parse(jsonString)
- if (typeof parsed?.source === 'string') jsonSource = parsed.source
- } catch { /* already parsed above */ }
- const source = (sourceParam === 'like' || sourceParam === 'bookmark')
- ? sourceParam
- : (jsonSource === 'like' ? 'like' : 'bookmark')
-
- await prisma.importJob.update({
- where: { id: importJob.id },
- data: { totalCount: parsedBookmarks.length },
- })
-
- let importedCount = 0
- let skippedCount = 0
-
- for (const bookmark of parsedBookmarks) {
- try {
- const existing = await prisma.bookmark.findUnique({
- where: { tweetId: bookmark.tweetId },
- select: { id: true },
- })
-
- if (existing) {
- skippedCount++
- continue
- }
-
- const created = await prisma.bookmark.create({
- data: {
- tweetId: bookmark.tweetId,
- text: bookmark.text,
- authorHandle: bookmark.authorHandle,
- authorName: bookmark.authorName,
- tweetCreatedAt: bookmark.tweetCreatedAt,
- rawJson: bookmark.rawJson,
- source,
- },
- })
-
- if (bookmark.media.length > 0) {
- await prisma.mediaItem.createMany({
- data: bookmark.media.map((m) => ({
- bookmarkId: created.id,
- type: m.type,
- url: m.url,
- thumbnailUrl: m.thumbnailUrl ?? null,
- })),
- })
- }
-
- importedCount++
- } catch (err) {
- console.error(`Failed to import tweet ${bookmark.tweetId}:`, err)
- skippedCount++
- }
- }
-
- await prisma.importJob.update({
- where: { id: importJob.id },
- data: {
- status: 'done',
- processedCount: importedCount,
- },
- })
-
- return NextResponse.json({
- jobId: importJob.id,
- imported: importedCount,
- skipped: skippedCount,
- parsed: parsedBookmarks.length,
- })
}
diff --git a/app/api/import/twitter/route.ts b/app/api/import/twitter/route.ts
index 3154484..84aa008 100644
--- a/app/api/import/twitter/route.ts
+++ b/app/api/import/twitter/route.ts
@@ -1,7 +1,7 @@
import { NextRequest, NextResponse } from 'next/server'
-import prisma from '@/lib/db'
+import { upsertTweets, type IncomingTweet } from '@/lib/upsert-tweet'
-const BEARER = 'AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I%2BxMb1nYFAA%3DUognEfK4ZPxYowpr4nMskopkC%2FDO'
+const BEARER = process.env.X_BEARER_TOKEN ?? ''
const FEATURES = JSON.stringify({
graphql_timeline_v2_bookmark_timeline: true,
@@ -32,7 +32,7 @@ const FEATURES = JSON.stringify({
// filter by "graphql", find the "Likes" request, and grab the ID from the URL path.
const ENDPOINTS = {
bookmark: {
- queryId: 'j5KExFXy1niL_uGnBhHNxA',
+ queryId: 'BBxBluh79axE_HJzZPcBDw',
operationName: 'Bookmarks',
referer: 'https://x.com/i/bookmarks',
getInstructions: (d: Record): unknown[] =>
@@ -40,14 +40,14 @@ const ENDPOINTS = {
(d as any)?.data?.bookmark_timeline_v2?.timeline?.instructions ?? [],
},
like: {
- // PLACEHOLDER — you must replace this with the real query ID from x.com Network tab
- queryId: 'REPLACE_ME',
+ queryId: 'zPJ36q7-jHyvvHcmx8yymg',
operationName: 'Likes',
referer: 'https://x.com',
getInstructions: (d: Record): unknown[] => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const a = d as any
- return a?.data?.user?.result?.timeline_v2?.timeline?.instructions
+ return a?.data?.user?.result?.timeline?.timeline?.instructions
+ ?? a?.data?.user?.result?.timeline_v2?.timeline?.instructions
?? a?.data?.liked_tweets_timeline?.timeline?.instructions
?? []
},
@@ -94,16 +94,29 @@ interface ArticleResult {
content_state?: { blocks?: ArticleBlock[] }
}
+interface UserResult {
+ legacy?: UserLegacy
+ core?: UserLegacy // New Twitter structure puts screen_name/name here
+}
+
interface TweetResult {
__typename?: string
rest_id?: string
legacy?: TweetLegacy
- core?: { user_results?: { result?: { legacy?: UserLegacy } } }
+ core?: { user_results?: { result?: UserResult } }
note_tweet?: { note_tweet_results?: { result?: { text?: string } } }
article?: { article_results?: { result?: ArticleResult } }
+ quoted_status_result?: { result?: TweetResult }
tweet?: TweetResult
}
+function getUserInfo(userResult?: UserResult): { screen_name: string; name: string } {
+ return {
+ screen_name: userResult?.legacy?.screen_name ?? userResult?.core?.screen_name ?? 'unknown',
+ name: userResult?.legacy?.name ?? userResult?.core?.name ?? 'Unknown',
+ }
+}
+
async function fetchPage(authToken: string, ct0: string, source: Source, cursor?: string, userId?: string) {
const cfg = ENDPOINTS[source]
const variables = JSON.stringify({
@@ -193,24 +206,42 @@ function articleBlocksText(article: ArticleResult): string {
}
function tweetFullText(tweet: TweetResult): string {
+ let text: string
if (tweet.note_tweet?.note_tweet_results?.result?.text) {
- return decodeHtmlEntities(tweet.note_tweet.note_tweet_results.result.text)
- }
- const article = tweet.article?.article_results?.result
- if (article) {
- const parts: string[] = []
- if (article.title) parts.push(article.title)
- if (article.content) parts.push(article.content)
-
- // Fallback: some X articles ship content in content_state.blocks
- if (parts.length === 0) {
- const blocks = articleBlocksText(article)
- if (blocks) parts.push(blocks)
+ text = decodeHtmlEntities(tweet.note_tweet.note_tweet_results.result.text)
+ } else {
+ const article = tweet.article?.article_results?.result
+ if (article) {
+ const parts: string[] = []
+ if (article.title) parts.push(article.title)
+ if (article.content) parts.push(article.content)
+
+ // Fallback: some X articles ship content in content_state.blocks
+ if (parts.length === 0) {
+ const blocks = articleBlocksText(article)
+ if (blocks) parts.push(blocks)
+ }
+
+ text = parts.length > 0 ? decodeHtmlEntities(parts.join('\n\n')) : decodeHtmlEntities(tweet.legacy?.full_text ?? '')
+ } else {
+ text = decodeHtmlEntities(tweet.legacy?.full_text ?? '')
}
+ }
- if (parts.length > 0) return decodeHtmlEntities(parts.join('\n\n'))
+ // Append quoted tweet content for better categorization
+ let qt = tweet.quoted_status_result?.result
+ if (qt?.__typename === 'TweetWithVisibilityResults' && qt.tweet) {
+ qt = qt.tweet
}
- return decodeHtmlEntities(tweet.legacy?.full_text ?? '')
+ if (qt) {
+ const qtText = qt.legacy?.full_text || qt.note_tweet?.note_tweet_results?.result?.text
+ const qtAuthor = getUserInfo(qt.core?.user_results?.result).screen_name
+ if (qtText) {
+ text += `\n\n[Quote @${qtAuthor}]: ${decodeHtmlEntities(qtText)}`
+ }
+ }
+
+ return text
}
function extractMedia(tweet: TweetResult) {
@@ -254,6 +285,10 @@ export async function POST(request: NextRequest): Promise {
const source: Source = body.source === 'like' ? 'like' : 'bookmark'
const userId = body.userId?.trim()
+ if (!BEARER) {
+ return NextResponse.json({ error: 'X_BEARER_TOKEN is not configured. Add it to your .env file.' }, { status: 500 })
+ }
+
if (!authToken?.trim() || !ct0?.trim()) {
return NextResponse.json({ error: 'authToken and ct0 are required' }, { status: 400 })
}
@@ -264,6 +299,8 @@ export async function POST(request: NextRequest): Promise {
let imported = 0
let skipped = 0
+ let updated = 0
+ let errored = 0
let cursor: string | undefined
try {
@@ -271,49 +308,30 @@ export async function POST(request: NextRequest): Promise {
const data = await fetchPage(authToken.trim(), ct0.trim(), source, cursor, userId)
const { tweets, nextCursor } = parsePage(data, source)
- for (const tweet of tweets) {
- if (!tweet.rest_id) continue
-
- const exists = await prisma.bookmark.findUnique({
- where: { tweetId: tweet.rest_id },
- select: { id: true },
- })
-
- if (exists) {
- skipped++
- continue
- }
-
- const media = extractMedia(tweet)
- const userLegacy = tweet.core?.user_results?.result?.legacy ?? {}
-
- const created = await prisma.bookmark.create({
- data: {
- tweetId: tweet.rest_id,
+ // Convert raw tweets to IncomingTweet format for the shared upsert
+ const incoming: IncomingTweet[] = tweets
+ .filter((t) => t.rest_id)
+ .map((tweet) => {
+ const { screen_name, name } = getUserInfo(tweet.core?.user_results?.result)
+ return {
+ tweetId: tweet.rest_id!,
text: tweetFullText(tweet),
- authorHandle: userLegacy.screen_name ?? 'unknown',
- authorName: userLegacy.name ?? 'Unknown',
+ authorHandle: screen_name,
+ authorName: name,
tweetCreatedAt: tweet.legacy?.created_at
? new Date(tweet.legacy.created_at)
: null,
rawJson: JSON.stringify(tweet),
source,
- },
+ media: extractMedia(tweet),
+ }
})
- if (media.length > 0) {
- await prisma.mediaItem.createMany({
- data: media.map((m) => ({
- bookmarkId: created.id,
- type: m.type,
- url: m.url,
- thumbnailUrl: m.thumbnailUrl ?? null,
- })),
- })
- }
-
- imported++
- }
+ const result = await upsertTweets(incoming)
+ imported += result.imported
+ skipped += result.skipped
+ updated += result.updated
+ errored += result.errored
if (!nextCursor || tweets.length === 0) break
cursor = nextCursor
@@ -325,5 +343,5 @@ export async function POST(request: NextRequest): Promise {
)
}
- return NextResponse.json({ imported, skipped })
+ return NextResponse.json({ imported, skipped, updated, errored })
}
diff --git a/app/bookmarks/page.tsx b/app/bookmarks/page.tsx
index 260fa5e..b318bce 100644
--- a/app/bookmarks/page.tsx
+++ b/app/bookmarks/page.tsx
@@ -398,11 +398,11 @@ function BookmarksPageInner() {
{total > 0 ? (
<>
{total.toLocaleString()}
- {' '}bookmark{total !== 1 ? 's' : ''}
+ {' '}tweet{total !== 1 ? 's' : ''}
{filters.q && for "{filters.q}" }
>
) : (
- 'No bookmarks found'
+ 'No tweets found'
)}
@@ -421,7 +421,7 @@ function BookmarksPageInner() {
- No bookmarks match your filters
+ No tweets match your filters
Try adjusting your search or removing some filters.
diff --git a/app/categories/[slug]/page.tsx b/app/categories/[slug]/page.tsx
index f2b99d0..4784541 100644
--- a/app/categories/[slug]/page.tsx
+++ b/app/categories/[slug]/page.tsx
@@ -130,7 +130,7 @@ export default function CategoryPage() {
{category.description && (
{category.description}
)}
- {total.toLocaleString()} bookmark{total !== 1 ? 's' : ''}
+ {total.toLocaleString()} tweet{total !== 1 ? 's' : ''}
- No bookmarks in this category
+ No tweets in this category
)}
diff --git a/app/categories/page.tsx b/app/categories/page.tsx
index 7dff5ff..b9da79a 100644
--- a/app/categories/page.tsx
+++ b/app/categories/page.tsx
@@ -271,8 +271,8 @@ export default function CategoriesPage() {
{loading
? 'Loading your categories...'
: categories.length > 0
- ? `${totalBookmarks.toLocaleString()} bookmarks across ${categories.length} categories`
- : 'Organize your bookmarks by topic'}
+ ? `${totalBookmarks.toLocaleString()} tweets across ${categories.length} categories`
+ : 'Organize your tweets by topic'}
No categories yet
- Create your first category to start organizing your bookmarks by topic.
+ Create your first category to start organizing your tweets by topic.
setModalOpen(true)}
@@ -331,7 +331,7 @@ export default function CategoriesPage() {
AI Categorize
{' '}
- to automatically assign bookmarks to your categories.
+ to automatically assign tweets to your categories.
)}
diff --git a/app/categorize/page.tsx b/app/categorize/page.tsx
index 3670010..0344824 100644
--- a/app/categorize/page.tsx
+++ b/app/categorize/page.tsx
@@ -38,12 +38,12 @@ const STAGE_INFO: Record, { label: string; icon: React.ReactN
enrichment: {
label: 'Generating semantic tags',
icon: ,
- desc: 'Creating 30-50 searchable tags per bookmark for AI search',
+ desc: 'Creating 30-50 searchable tags per tweet for AI search',
},
categorize: {
label: 'Categorizing',
icon: ,
- desc: 'Assigning each bookmark to the most relevant categories',
+ desc: 'Assigning each tweet to the most relevant categories',
},
parallel: {
label: 'Processing all stages in parallel',
@@ -139,7 +139,7 @@ export default function CategorizePage() {
AI Categorization
- Categorize Bookmarks
+ Categorize Tweets
4-stage AI pipeline: vision analysis → entity extraction → semantic tagging → categorization.
diff --git a/app/import/page.tsx b/app/import/page.tsx
index 255bf46..646c1df 100644
--- a/app/import/page.tsx
+++ b/app/import/page.tsx
@@ -2,11 +2,11 @@
import { useState, useRef, useCallback, useEffect } from 'react'
import Link from 'next/link'
-import { Upload, CheckCircle, ChevronRight, Loader2, Copy, Check, ExternalLink, Sparkles, Eye, Tag, Brain, Layers, StopCircle, RefreshCw, Clock, KeyRound, Trash2, AlertCircle, User, LogOut } from 'lucide-react'
+import { Upload, CheckCircle, ChevronRight, Loader2, Copy, Check, ExternalLink, Sparkles, Eye, Tag, Brain, Layers, StopCircle, RefreshCw, Clock, AlertCircle, User, LogOut } from 'lucide-react'
import * as Progress from '@radix-ui/react-progress'
type Step = 1 | 2 | 3
-type Method = 'bookmarklet' | 'console' | 'live'
+type Method = 'bookmarklet' | 'console' | 'live' | 'archive'
interface ImportResult {
imported: number
@@ -48,12 +48,12 @@ const STAGE_INFO: Record, { label: string; icon: React.ReactN
enrichment: {
label: 'Generating semantic tags',
icon: ,
- desc: 'Creating 30-50 searchable tags per bookmark for AI search',
+ desc: 'Creating 30-50 searchable tags per tweet for AI search',
},
categorize: {
label: 'Categorizing',
icon: ,
- desc: 'Assigning each bookmark to the most relevant categories',
+ desc: 'Assigning each tweet to the most relevant categories',
},
parallel: {
label: 'Processing all stages in parallel',
@@ -106,11 +106,17 @@ const BOOKMARKLET_SCRIPT = `(async function(){
}
return thumb?{type:'photo',url:thumb}:null;
}).filter(Boolean);
+ var qt=t.quoted_status_result&&t.quoted_status_result.result;
+ var qtLeg=qt&&(qt.legacy||{});
+ var qtUsr=qt&&qt.core&&qt.core.user_results&&qt.core.user_results.result&&qt.core.user_results.result.legacy||{};
+ var quotedText=qtLeg&&qtLeg.full_text?qtLeg.full_text:'';
+ var quotedAuthor=qtUsr.screen_name||'';
all.push({id:t.rest_id,author:usr.name||'Unknown',handle:'@'+(usr.screen_name||'unknown'),
avatar:usr.profile_image_url_https||'',timestamp:leg.created_at||'',
text:leg.full_text||leg.text||'',media:media,
hashtags:(leg.entities&&leg.entities.hashtags||[]).map(function(h){return h.text;}),
- urls:(leg.entities&&leg.entities.urls||[]).map(function(u){return u.expanded_url;}).filter(Boolean)});
+ urls:(leg.entities&&leg.entities.urls||[]).map(function(u){return u.expanded_url;}).filter(Boolean),
+ quotedText:quotedText,quotedAuthor:quotedAuthor});
btn.textContent='Export '+all.length+' '+label+' \u2192';
}
function processEntry(e){
@@ -234,11 +240,17 @@ const CONSOLE_SCRIPT = `(async function() {
}
return thumb ? { type: 'photo', url: thumb } : null;
}).filter(Boolean);
+ const qt = t.quoted_status_result?.result;
+ const qtLeg = qt?.legacy ?? {};
+ const qtUsr = qt?.core?.user_results?.result?.legacy ?? {};
+ const quotedText = qtLeg.full_text ?? '';
+ const quotedAuthor = qtUsr.screen_name ?? '';
all.push({
id: t.rest_id, author: usr.name ?? 'Unknown', handle: '@' + (usr.screen_name ?? 'unknown'),
timestamp: leg.created_at ?? '', text: leg.full_text ?? leg.text ?? '', media,
hashtags: (leg.entities?.hashtags ?? []).map(h => h.text),
- urls: (leg.entities?.urls ?? []).map(u => u.expanded_url).filter(Boolean)
+ urls: (leg.entities?.urls ?? []).map(u => u.expanded_url).filter(Boolean),
+ quotedText, quotedAuthor
});
btn.textContent = \`Export \${all.length} \${label} →\`;
}
@@ -448,7 +460,7 @@ function UploadZone({ onFile }: { onFile: (file: File) => void }) {
e.preventDefault()
setDragging(false)
const file = e.dataTransfer.files[0]
- if (file && file.name.endsWith('.json')) onFile(file)
+ if (file && (file.name.endsWith('.json') || file.name.endsWith('.js') || file.name.endsWith('.zip'))) onFile(file)
}, [onFile])
const handleFileChange = useCallback((e: React.ChangeEvent) => {
@@ -467,9 +479,9 @@ function UploadZone({ onFile }: { onFile: (file: File) => void }) {
}`}
>
- Drop your JSON file here
+ Drop your JSON, JS, or ZIP file here
or click to browse
-
+
)
}
@@ -879,15 +891,106 @@ function LiveImportTab({ onSynced }: { onSynced: (result: ImportResult) => void
)
}
-function InstructionsStep({ onFile, importSource, onLiveSynced }: { onFile: (file: File) => void; importSource: 'bookmark' | 'like'; onLiveSynced: (result: ImportResult) => void }) {
+function ArchiveTab({ onFile }: { onFile: (file: File) => void }) {
+ const steps = [
+ {
+ num: 1,
+ title: (
+
+ Go to{' '}
+
+ x.com → Settings → Download an archive of your data
+
+
+ ),
+ content: (
+
+ You may need to re-verify your password. Click Request archive if you haven't already.
+
+ ),
+ },
+ {
+ num: 2,
+ title: 'Wait for the email from X, then download and extract the ZIP',
+ content: (
+
+ X will email you when your archive is ready. This can take anywhere from a few minutes to 24+ hours. Download and unzip the file.
+
+ ),
+ },
+ {
+ num: 3,
+ title: (
+
+ Upload like.js or{' '}
+ bookmark.js from the{' '}
+ data/ folder
+
+ ),
+ content: (
+
+
+ Inside the extracted archive, look for data/like.js (your likes) or{' '}
+ data/bookmark.js (your bookmarks).
+
+
You can also upload the whole ZIP file directly — Siftly will extract the right files automatically.
+
+ ),
+ },
+ ]
+
+ return (
+
+
+
+
+ Twitter/X Archive Import
+
+
Import from your official X data archive. This includes all your likes and bookmarks — even very old ones that may no longer load on the website.
+
The source (likes vs bookmarks) is detected automatically from the file.
+
+
+
+ {steps.map((step, i) => (
+
+
+ {step.num}
+
+
+
{step.title}
+ {step.content}
+
+
+ ))}
+
+
+
+
Upload your archive file
+
+
+
+ )
+}
+
+function InstructionsStep({ onFile, importSource, onLiveSynced, onMethodChange }: { onFile: (file: File) => void; importSource: 'bookmark' | 'like'; onLiveSynced: (result: ImportResult) => void; onMethodChange?: (method: Method) => void }) {
const [method, setMethod] = useState('bookmarklet')
+ function changeMethod(m: Method) {
+ setMethod(m)
+ onMethodChange?.(m)
+ }
+
return (
{/* Method tabs */}
setMethod('live')}
+ onClick={() => changeMethod('live')}
className={`flex-1 px-3 py-2 rounded-lg text-sm font-medium transition-all ${
method === 'live'
? 'bg-zinc-900 text-zinc-100 shadow-sm'
@@ -899,7 +1002,7 @@ function InstructionsStep({ onFile, importSource, onLiveSynced }: { onFile: (fil
Recommended
setMethod('bookmarklet')}
+ onClick={() => changeMethod('bookmarklet')}
className={`flex-1 px-3 py-2 rounded-lg text-sm font-medium transition-all ${
method === 'bookmarklet'
? 'bg-zinc-900 text-zinc-100 shadow-sm'
@@ -909,7 +1012,7 @@ function InstructionsStep({ onFile, importSource, onLiveSynced }: { onFile: (fil
Bookmarklet
setMethod('console')}
+ onClick={() => changeMethod('console')}
className={`flex-1 px-3 py-2 rounded-lg text-sm font-medium transition-all ${
method === 'console'
? 'bg-zinc-900 text-zinc-100 shadow-sm'
@@ -918,10 +1021,23 @@ function InstructionsStep({ onFile, importSource, onLiveSynced }: { onFile: (fil
>
{'>'} Console
+ changeMethod('archive')}
+ className={`flex-1 px-3 py-2 rounded-lg text-sm font-medium transition-all ${
+ method === 'archive'
+ ? 'bg-zinc-900 text-zinc-100 shadow-sm'
+ : 'text-zinc-500 hover:text-zinc-300'
+ }`}
+ >
+
+ Archive
+
{method === 'live' ? (
+ ) : method === 'archive' ? (
+
) : method === 'bookmarklet' ? (
) : (
@@ -1204,14 +1320,14 @@ function CategorizeStep({ importedCount, force = false }: { importedCount: numbe
Already up to date
-
All bookmarks in this file were already imported
+
All tweets in this file were already imported
- View your bookmarks
+ View your tweets
- {uncategorized.toLocaleString()} bookmarks not yet processed
+ {uncategorized.toLocaleString()} tweets not yet processed
- Re-analyze all {totalBookmarks.toLocaleString()} bookmarks from scratch
+ Re-analyze all {totalBookmarks.toLocaleString()} tweets from scratch
('bookmarklet')
// Auto-resume to step 3 if the pipeline is already running (e.g. user navigated away and back)
useEffect(() => {
@@ -1317,9 +1434,16 @@ export default function ImportPage() {
setImportError('')
try {
+ // Detect archive files (.js like like.js/bookmark.js) and auto-set source
+ const isArchiveJs = file.name.endsWith('.js')
+ if (isArchiveJs) {
+ if (file.name.startsWith('like')) setImportSource('like')
+ else if (file.name.startsWith('bookmark')) setImportSource('bookmark')
+ }
+
const formData = new FormData()
formData.append('file', file)
- formData.append('source', importSource)
+ formData.append('source', isArchiveJs && file.name.startsWith('like') ? 'like' : importSource)
const res = await fetch('/api/import', { method: 'POST', body: formData })
const data = await res.json()
@@ -1339,7 +1463,7 @@ export default function ImportPage() {
if (parsed === 0) {
// Parser couldn't extract any bookmarks — likely wrong format
- throw new Error('Could not parse any bookmarks from this file. Make sure you\'re uploading a Twitter/X bookmarks JSON export.')
+ throw new Error('Could not parse any bookmarks from this file. Make sure you\'re uploading a Twitter/X bookmarks or likes export.')
}
// Auto-advance to categorization after a brief moment to show the result
@@ -1360,8 +1484,8 @@ export default function ImportPage() {
Export your X/Twitter {importSource === 'like' ? 'likes' : 'bookmarks'} as JSON, then upload below.
- {/* Source selector */}
- {step === 1 && (
+ {/* Source selector — hidden for archive (source is auto-detected from file) */}
+ {step === 1 && currentMethod !== 'archive' && (
setImportSource('bookmark')}
@@ -1397,7 +1521,7 @@ export default function ImportPage() {
- {step === 1 && }
+ {step === 1 && }
{step === 2 && (
, string> = {
entities: 'Extracting entities…',
vision: 'Analyzing images…',
enrichment: 'Generating semantic tags…',
- categorize: 'Categorizing bookmarks…',
- parallel: 'Processing bookmarks in parallel…',
+ categorize: 'Categorizing tweets…',
+ parallel: 'Processing tweets in parallel…',
}
function UncategorizedState({ totalBookmarks }: { totalBookmarks: number }) {
@@ -172,9 +172,9 @@ function UncategorizedState({ totalBookmarks }: { totalBookmarks: number }) {
-
Bookmarks not categorized yet
+
Tweets not categorized yet
- You have {totalBookmarks.toLocaleString()} bookmarks imported.
+ You have {totalBookmarks.toLocaleString()} tweets imported.
Run AI categorization to populate the mindmap.
@@ -289,9 +289,9 @@ function MindmapOverlay({
-
Bookmarks Not Categorized Yet
+
Tweets Not Categorized Yet
- You have {totalBookmarks.toLocaleString()} bookmarks imported.
+ You have {totalBookmarks.toLocaleString()} tweets imported.
The mindmap will fill in once AI categorization completes.
@@ -373,7 +373,7 @@ export default function MindmapPage() {
) : (
No data to display
-
Import and categorize bookmarks first.
+
Import and categorize tweets first.
)}
diff --git a/app/page.tsx b/app/page.tsx
index b7e5ee5..1fef421 100644
--- a/app/page.tsx
+++ b/app/page.tsx
@@ -210,7 +210,7 @@ export default async function DashboardPage() {
{/* Stat Cards */}
0 ? `${data.bookmarkSourceCount.toLocaleString()} bookmarks · ${data.likeSourceCount.toLocaleString()} likes` : 'Total Bookmarks'}
+ label={data.likeSourceCount > 0 ? `${data.bookmarkSourceCount.toLocaleString()} bookmarks · ${data.likeSourceCount.toLocaleString()} likes` : 'Total Tweets'}
value={data.totalBookmarks}
icon={BookmarkIcon}
iconColor="text-indigo-400"
@@ -318,10 +318,10 @@ function EmptyState() {
- No bookmarks yet
+ No tweets saved yet
- Import your Twitter bookmarks to get started. Once imported, use AI to automatically
- categorize and organize them.
+ Import your Twitter/X bookmarks or likes to get started. Once imported, use AI to
+ automatically categorize and organize them.
- Import bookmarks
+ Import tweets
void }) {
>
-
Clear all bookmarks
-
Permanently delete all imported bookmarks
+
Clear all tweets
+
Permanently delete all imported tweets
{cleared ? (
@@ -774,12 +774,12 @@ function AboutSection() {
}
return (
-
+
Siftly is a self-hosted app for
- organizing your Twitter/X bookmarks. Use the built-in bookmarklet or console script to import,
- then run the 4-stage AI pipeline to analyze images, extract entities, generate semantic tags, and
- auto-categorize — then explore connections through the interactive mindmap.
+ organizing your Twitter/X bookmarks and likes. Import via bookmarklet, console script, file upload,
+ or the Live Import API — then run the 4-stage AI pipeline to analyze images, extract entities,
+ generate semantic tags, and auto-categorize. Explore connections through the interactive mindmap.
{/* Builder + support row */}
@@ -904,7 +904,7 @@ function XOAuthSection({ onToast }: { onToast: (t: Toast) => void }) {
{savedId ? (
diff --git a/components/bookmark-card.tsx b/components/bookmark-card.tsx
index be48a7a..4be80d9 100644
--- a/components/bookmark-card.tsx
+++ b/components/bookmark-card.tsx
@@ -630,11 +630,11 @@ export default function BookmarkCard({ bookmark }: BookmarkCardProps) {
(firstMedia.type === 'photo' || isVideoUrl(firstMedia.url))
return (
-
+
{/* Top media — full bleed, no padding */}
{firstMedia && (
-
+
)}
diff --git a/lib/parser.test.ts b/lib/parser.test.ts
new file mode 100644
index 0000000..e4f2e76
--- /dev/null
+++ b/lib/parser.test.ts
@@ -0,0 +1,474 @@
+import { describe, it, expect } from 'vitest'
+import { parseTweetsJson, parseTweetsWithMeta } from './parser'
+
+// ---------------------------------------------------------------------------
+// Helpers – build fixture strings
+// ---------------------------------------------------------------------------
+
+function likesArchive(items: object[]): string {
+ return `window.YTD.like.part0 = ${JSON.stringify(items)}`
+}
+
+function bookmarksArchive(items: object[]): string {
+ return `window.YTD.bookmark.part0 = ${JSON.stringify(items)}`
+}
+
+// A minimal raw tweet object (the standard API-style format)
+function rawTweet(overrides: Record
= {}) {
+ return {
+ id_str: '999',
+ full_text: 'default tweet text',
+ created_at: 'Wed Oct 10 20:19:24 +0000 2018',
+ user: { screen_name: 'testuser', name: 'Test User' },
+ entities: { hashtags: [], urls: [], media: [] },
+ ...overrides,
+ }
+}
+
+// ---------------------------------------------------------------------------
+// 1. parseTweetsJson with archive .js content
+// ---------------------------------------------------------------------------
+describe('parseTweetsJson – archive .js content', () => {
+ it('strips window.YTD.like prefix and parses correctly', () => {
+ const content = likesArchive([
+ { like: { tweetId: '123', fullText: 'hello world', expandedUrl: 'https://x.com/user/status/123' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('123')
+ expect(result[0].text).toBe('hello world')
+ expect(result[0].urls).toEqual(['https://x.com/user/status/123'])
+ })
+
+ it('strips window.YTD.bookmark prefix and parses correctly', () => {
+ const content = bookmarksArchive([
+ { bookmark: { tweetId: '456', fullText: 'saved tweet' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('456')
+ expect(result[0].text).toBe('saved tweet')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 2. parseTweetsWithMeta – returns tweets AND detectedSource
+// ---------------------------------------------------------------------------
+describe('parseTweetsWithMeta', () => {
+ it('returns detectedSource "like" for likes archive', () => {
+ const content = likesArchive([
+ { like: { tweetId: '100', fullText: 'liked tweet' } },
+ ])
+
+ const result = parseTweetsWithMeta(content)
+ expect(result.detectedSource).toBe('like')
+ expect(result.tweets).toHaveLength(1)
+ expect(result.tweets[0].tweetId).toBe('100')
+ })
+
+ it('returns detectedSource "bookmark" for bookmarks archive', () => {
+ const content = bookmarksArchive([
+ { bookmark: { tweetId: '200', fullText: 'bookmarked tweet' } },
+ ])
+
+ const result = parseTweetsWithMeta(content)
+ expect(result.detectedSource).toBe('bookmark')
+ expect(result.tweets).toHaveLength(1)
+ })
+
+ it('returns undefined detectedSource for non-archive JSON', () => {
+ const content = JSON.stringify([rawTweet({ id_str: '300' })])
+
+ const result = parseTweetsWithMeta(content)
+ expect(result.detectedSource).toBeUndefined()
+ expect(result.tweets).toHaveLength(1)
+ expect(result.tweets[0].tweetId).toBe('300')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 3. Likes archive format
+// ---------------------------------------------------------------------------
+describe('likes archive format', () => {
+ it('parses a likes archive item with all fields', () => {
+ const content = likesArchive([
+ {
+ like: {
+ tweetId: '123',
+ fullText: 'hello world',
+ expandedUrl: 'https://x.com/user/status/123',
+ },
+ },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0]).toMatchObject({
+ tweetId: '123',
+ text: 'hello world',
+ urls: ['https://x.com/user/status/123'],
+ authorHandle: 'unknown',
+ authorName: 'Unknown',
+ })
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 4. Bookmarks archive format
+// ---------------------------------------------------------------------------
+describe('bookmarks archive format', () => {
+ it('parses a bookmarks archive item with tweetId and fullText', () => {
+ const content = bookmarksArchive([
+ { bookmark: { tweetId: '456', fullText: 'saved tweet' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0]).toMatchObject({
+ tweetId: '456',
+ text: 'saved tweet',
+ authorHandle: 'unknown',
+ authorName: 'Unknown',
+ })
+ })
+
+ it('includes expandedUrl when present', () => {
+ const content = bookmarksArchive([
+ {
+ bookmark: {
+ tweetId: '456',
+ fullText: 'saved tweet',
+ expandedUrl: 'https://example.com',
+ },
+ },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result[0].urls).toEqual(['https://example.com'])
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 5. Sparse data – archive items with only tweetId
+// ---------------------------------------------------------------------------
+describe('sparse archive data', () => {
+ it('parses archive item with only tweetId (no fullText, no expandedUrl)', () => {
+ const content = likesArchive([
+ { like: { tweetId: '789' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('789')
+ expect(result[0].text).toBe('')
+ expect(result[0].urls).toEqual([])
+ })
+
+ it('skips archive items with no tweetId at all', () => {
+ const content = likesArchive([
+ { like: {} },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(0)
+ })
+
+ it('skips archive items with empty inner object', () => {
+ const content = likesArchive([
+ { like: { tweetId: undefined } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(0)
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 6. Empty archive
+// ---------------------------------------------------------------------------
+describe('empty archive', () => {
+ it('returns empty array for empty likes archive', () => {
+ const content = 'window.YTD.like.part0 = []'
+ const result = parseTweetsJson(content)
+ expect(result).toEqual([])
+ })
+
+ it('returns empty array for empty bookmarks archive', () => {
+ const content = 'window.YTD.bookmark.part0 = []'
+ const result = parseTweetsJson(content)
+ expect(result).toEqual([])
+ })
+
+ it('parseTweetsWithMeta returns empty tweets and no detectedSource for empty likes', () => {
+ const content = 'window.YTD.like.part0 = []'
+ const result = parseTweetsWithMeta(content)
+ expect(result.tweets).toEqual([])
+ // archiveType is still detected from the prefix even with empty array
+ expect(result.detectedSource).toBe('like')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 7. Multiple items
+// ---------------------------------------------------------------------------
+describe('multiple archive items', () => {
+ it('parses all items from a likes archive', () => {
+ const content = likesArchive([
+ { like: { tweetId: '1', fullText: 'first' } },
+ { like: { tweetId: '2', fullText: 'second' } },
+ { like: { tweetId: '3', fullText: 'third' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(3)
+ expect(result.map((b) => b.tweetId)).toEqual(['1', '2', '3'])
+ expect(result.map((b) => b.text)).toEqual(['first', 'second', 'third'])
+ })
+
+ it('parses all items from a bookmarks archive', () => {
+ const content = bookmarksArchive([
+ { bookmark: { tweetId: '10', fullText: 'a' } },
+ { bookmark: { tweetId: '20', fullText: 'b' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(2)
+ expect(result.map((b) => b.tweetId)).toEqual(['10', '20'])
+ })
+
+ it('skips invalid items among valid ones', () => {
+ const content = likesArchive([
+ { like: { tweetId: '1', fullText: 'valid' } },
+ { like: {} },
+ { like: { tweetId: '3', fullText: 'also valid' } },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(2)
+ expect(result.map((b) => b.tweetId)).toEqual(['1', '3'])
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 8. Backward compatibility – existing JSON formats still work
+// ---------------------------------------------------------------------------
+describe('backward compatibility – existing formats', () => {
+ it('parses raw tweet array (API-style)', () => {
+ const content = JSON.stringify([
+ rawTweet({ id_str: '111', full_text: 'api tweet', user: { screen_name: 'alice', name: 'Alice' } }),
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0]).toMatchObject({
+ tweetId: '111',
+ text: 'api tweet',
+ authorHandle: 'alice',
+ authorName: 'Alice',
+ })
+ })
+
+ it('parses console export format', () => {
+ const content = JSON.stringify({
+ exportDate: '2024-01-01',
+ totalBookmarks: 1,
+ bookmarks: [
+ {
+ id: '222',
+ text: 'console tweet',
+ author: 'Bob',
+ handle: '@bob',
+ timestamp: '2024-01-01T00:00:00Z',
+ },
+ ],
+ })
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0]).toMatchObject({
+ tweetId: '222',
+ text: 'console tweet',
+ authorHandle: 'bob',
+ authorName: 'Bob',
+ })
+ })
+
+ it('appends quoted tweet text in console export format', () => {
+ const content = JSON.stringify({
+ bookmarks: [
+ {
+ id: '223',
+ text: 'check this out',
+ author: 'Alice',
+ handle: '@alice',
+ quotedText: 'the original thought',
+ quotedAuthor: 'carol',
+ },
+ ],
+ })
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('223')
+ expect(result[0].text).toBe(
+ 'check this out\n\n[Quote @carol]: the original thought'
+ )
+ expect(result[0].authorHandle).toBe('alice')
+ })
+
+ it('uses "unknown" for quotedAuthor when missing in console export format', () => {
+ const content = JSON.stringify({
+ bookmarks: [
+ {
+ id: '224',
+ text: 'interesting',
+ handle: '@dave',
+ quotedText: 'some quoted content',
+ },
+ ],
+ })
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].text).toBe(
+ 'interesting\n\n[Quote @unknown]: some quoted content'
+ )
+ })
+
+ it('handles empty main text with quoted tweet in console export format', () => {
+ const content = JSON.stringify({
+ bookmarks: [
+ {
+ id: '225',
+ handle: '@eve',
+ quotedText: 'just the quote',
+ quotedAuthor: 'frank',
+ },
+ ],
+ })
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].text).toBe(
+ '\n\n[Quote @frank]: just the quote'
+ )
+ })
+
+ it('parses flat export format (CSV-style)', () => {
+ const content = JSON.stringify([
+ {
+ 'Tweet Id': '333',
+ 'Full Text': 'flat tweet',
+ 'User Screen Name': 'charlie',
+ 'User Name': 'Charlie',
+ 'Created At': '2024-06-15T12:00:00Z',
+ },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0]).toMatchObject({
+ tweetId: '333',
+ text: 'flat tweet',
+ authorHandle: 'charlie',
+ authorName: 'Charlie',
+ })
+ })
+
+ it('parses Siftly re-export format', () => {
+ const content = JSON.stringify([
+ {
+ tweetId: '444',
+ text: 'siftly tweet',
+ authorHandle: 'dave',
+ authorName: 'Dave',
+ },
+ ])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0]).toMatchObject({
+ tweetId: '444',
+ text: 'siftly tweet',
+ authorHandle: 'dave',
+ authorName: 'Dave',
+ })
+ })
+
+ it('parses twitter-web-exporter format (object with array value)', () => {
+ const content = JSON.stringify({
+ tweets: [
+ rawTweet({ id_str: '555', full_text: 'wrapped tweet' }),
+ ],
+ })
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('555')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// 9. Mixed / archive detection doesn't break other formats
+// ---------------------------------------------------------------------------
+describe('archive detection does not break other formats', () => {
+ it('regular JSON array without window.YTD prefix works', () => {
+ const content = JSON.stringify([rawTweet({ id_str: '600' })])
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('600')
+ })
+
+ it('parseTweetsWithMeta correctly returns no source for regular JSON', () => {
+ const content = JSON.stringify([rawTweet({ id_str: '601' })])
+
+ const meta = parseTweetsWithMeta(content)
+ expect(meta.detectedSource).toBeUndefined()
+ expect(meta.tweets).toHaveLength(1)
+ })
+
+ it('content starting with whitespace before window.YTD prefix still works', () => {
+ const content = ` \n window.YTD.like.part0 = [{"like":{"tweetId":"700","fullText":"padded"}}]`
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('700')
+ expect(result[0].text).toBe('padded')
+ })
+
+ it('handles part numbers > 0', () => {
+ const content = `window.YTD.like.part3 = [{"like":{"tweetId":"800","fullText":"part three"}}]`
+
+ const result = parseTweetsJson(content)
+ expect(result).toHaveLength(1)
+ expect(result[0].tweetId).toBe('800')
+ })
+})
+
+// ---------------------------------------------------------------------------
+// Edge cases / error handling
+// ---------------------------------------------------------------------------
+describe('error handling', () => {
+ it('throws on empty string', () => {
+ expect(() => parseTweetsJson('')).toThrow('Empty JSON string provided')
+ })
+
+ it('throws on whitespace-only string', () => {
+ expect(() => parseTweetsJson(' ')).toThrow('Empty JSON string provided')
+ })
+
+ it('throws on invalid JSON', () => {
+ expect(() => parseTweetsJson('not json at all')).toThrow('Invalid JSON')
+ })
+
+ it('throws on invalid JSON after stripping archive prefix', () => {
+ expect(() => parseTweetsJson('window.YTD.like.part0 = {broken')).toThrow('Invalid JSON')
+ })
+
+ it('parseTweetsWithMeta throws on empty content', () => {
+ expect(() => parseTweetsWithMeta('')).toThrow('Empty content provided')
+ })
+})
diff --git a/lib/parser.ts b/lib/parser.ts
index e536722..d8cf170 100644
--- a/lib/parser.ts
+++ b/lib/parser.ts
@@ -4,7 +4,7 @@ export interface ParsedMedia {
thumbnailUrl?: string
}
-export interface ParsedBookmark {
+export interface ParsedTweet {
tweetId: string
text: string
authorHandle: string
@@ -144,7 +144,7 @@ function extractMedia(tweet: RawTweet): ParsedMedia[] {
.filter((m): m is ParsedMedia => m !== null)
}
-function parseSingleTweet(tweet: RawTweet): ParsedBookmark | null {
+function parseSingleTweet(tweet: RawTweet): ParsedTweet | null {
const tweetId = extractTweetId(tweet)
if (!tweetId) return null
@@ -213,6 +213,8 @@ interface ConsoleExportBookmark {
media?: { type?: string; url?: string }[]
hashtags?: string[]
urls?: string[]
+ quotedText?: string
+ quotedAuthor?: string
}
function isConsoleExportFormat(obj: unknown): obj is { bookmarks: ConsoleExportBookmark[] } {
@@ -237,9 +239,13 @@ function convertConsoleExportRow(row: ConsoleExportBookmark): RawTweet {
const handle = (row.handle ?? '').replace(/^@/, '')
+ const fullText = row.quotedText
+ ? `${row.text || ''}\n\n[Quote @${row.quotedAuthor || 'unknown'}]: ${row.quotedText}`
+ : row.text
+
return {
id_str: row.id,
- full_text: row.text,
+ full_text: fullText,
created_at: row.timestamp,
user: { screen_name: handle || 'unknown', name: row.author || handle || 'Unknown' },
entities: {
@@ -251,6 +257,50 @@ function convertConsoleExportRow(row: ConsoleExportBookmark): RawTweet {
}
}
+interface TwitterArchiveItem {
+ like?: { tweetId?: string; fullText?: string; expandedUrl?: string }
+ bookmark?: { tweetId?: string; fullText?: string; expandedUrl?: string }
+}
+
+function stripArchivePrefix(content: string): { json: string; archiveType: 'like' | 'bookmark' | null } {
+ const trimmed = content.trim()
+ if (!trimmed.startsWith('window.YTD.')) {
+ return { json: trimmed, archiveType: null }
+ }
+ const match = trimmed.match(/^window\.YTD\.(\w+)\.part\d+\s*=\s*/)
+ if (!match) {
+ return { json: trimmed, archiveType: null }
+ }
+ const rawType = match[1]
+ const archiveType: 'like' | 'bookmark' | null =
+ rawType === 'like' ? 'like' : rawType === 'bookmark' ? 'bookmark' : null
+ const json = trimmed.slice(match[0].length)
+ return { json, archiveType }
+}
+
+function isTwitterArchiveFormat(items: unknown[]): boolean {
+ if (items.length === 0) return false
+ const first = items[0]
+ if (typeof first !== 'object' || first === null) return false
+ return 'like' in first || 'bookmark' in first
+}
+
+function convertArchiveItem(item: TwitterArchiveItem): RawTweet {
+ const inner = item.like ?? item.bookmark
+ if (!inner) {
+ return {}
+ }
+ const urls: TwitterUrlEntity[] = inner.expandedUrl
+ ? [{ expanded_url: inner.expandedUrl }]
+ : []
+ return {
+ id_str: inner.tweetId,
+ full_text: inner.fullText,
+ user: { screen_name: 'unknown', name: 'Unknown' },
+ entities: { urls },
+ }
+}
+
interface SiftlyExportItem {
tweetId?: string
text?: string
@@ -301,6 +351,10 @@ function normalizeTweetArray(parsed: unknown): RawTweet[] {
if (parsed.length > 0 && isSiftlyExportFormat(parsed[0])) {
return parsed.map((row) => convertSiftlyExportRow(row as SiftlyExportItem))
}
+ // Twitter/X data archive format: [{ like: {...} }] or [{ bookmark: {...} }]
+ if (parsed.length > 0 && isTwitterArchiveFormat(parsed)) {
+ return parsed.map((item) => convertArchiveItem(item as TwitterArchiveItem))
+ }
return parsed as RawTweet[]
}
@@ -319,21 +373,23 @@ function normalizeTweetArray(parsed: unknown): RawTweet[] {
return []
}
-export function parseBookmarksJson(jsonString: string): ParsedBookmark[] {
+export function parseTweetsJson(jsonString: string): ParsedTweet[] {
if (!jsonString || jsonString.trim() === '') {
throw new Error('Empty JSON string provided')
}
+ const { json } = stripArchivePrefix(jsonString)
+
let parsed: unknown
try {
- parsed = JSON.parse(jsonString)
+ parsed = JSON.parse(json)
} catch (err) {
throw new Error(`Invalid JSON: ${err instanceof Error ? err.message : String(err)}`)
}
const tweets = normalizeTweetArray(parsed)
- const results: ParsedBookmark[] = []
+ const results: ParsedTweet[] = []
for (const tweet of tweets) {
const bookmark = parseSingleTweet(tweet)
if (bookmark !== null) {
@@ -343,3 +399,38 @@ export function parseBookmarksJson(jsonString: string): ParsedBookmark[] {
return results
}
+
+export interface ParseResult {
+ tweets: ParsedTweet[]
+ detectedSource?: 'like' | 'bookmark'
+}
+
+export function parseTweetsWithMeta(content: string): ParseResult {
+ if (!content || content.trim() === '') {
+ throw new Error('Empty content provided')
+ }
+
+ const { json, archiveType } = stripArchivePrefix(content)
+
+ let parsed: unknown
+ try {
+ parsed = JSON.parse(json)
+ } catch (err) {
+ throw new Error(`Invalid JSON: ${err instanceof Error ? err.message : String(err)}`)
+ }
+
+ const tweets = normalizeTweetArray(parsed)
+
+ const results: ParsedTweet[] = []
+ for (const tweet of tweets) {
+ const parsed = parseSingleTweet(tweet)
+ if (parsed !== null) {
+ results.push(parsed)
+ }
+ }
+
+ return {
+ tweets: results,
+ detectedSource: archiveType ?? undefined,
+ }
+}
diff --git a/lib/twitter-api.ts b/lib/twitter-api.ts
index cc36082..bd28816 100644
--- a/lib/twitter-api.ts
+++ b/lib/twitter-api.ts
@@ -2,8 +2,7 @@ import prisma from '@/lib/db'
// ── Constants ─────────────────────────────────────────────────────────────────
-const BEARER =
- 'AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA'
+const BEARER = process.env.X_BEARER_TOKEN ?? ''
const FEATURES = JSON.stringify({
graphql_timeline_v2_bookmark_timeline: true,
diff --git a/lib/upsert-tweet.ts b/lib/upsert-tweet.ts
new file mode 100644
index 0000000..5957700
--- /dev/null
+++ b/lib/upsert-tweet.ts
@@ -0,0 +1,143 @@
+import prisma from '@/lib/db'
+
+export interface IncomingTweet {
+ tweetId: string
+ text: string
+ authorHandle: string
+ authorName: string
+ tweetCreatedAt: Date | null
+ rawJson: string | null
+ source: string
+ media: { type: string; url: string; thumbnailUrl?: string | null }[]
+}
+
+export interface UpsertResult {
+ imported: number
+ skipped: number
+ updated: number
+ errored: number
+}
+
+/**
+ * Checks whether incoming tweet data is richer than what's already stored.
+ * Returns true if the incoming data has longer text, a real author handle
+ * where we had 'unknown', or new media where we had none.
+ */
+function isIncomingRicher(
+ incoming: IncomingTweet,
+ existing: { text: string | null; authorHandle: string | null; _count: { mediaItems: number } },
+): boolean {
+ return (
+ (incoming.text !== (existing.text ?? '') && incoming.text.length >= (existing.text?.length ?? 0)) ||
+ (existing.authorHandle === 'unknown' && incoming.authorHandle !== 'unknown') ||
+ (existing._count.mediaItems === 0 && incoming.media.length > 0)
+ )
+}
+
+/**
+ * Upserts a single tweet inside a transaction:
+ * - New tweet → create + media
+ * - Existing but incoming is richer → update + clear enrichment + replace media
+ * - Existing and equal/worse → skip
+ */
+async function upsertOne(
+ tweet: IncomingTweet,
+): Promise<'imported' | 'updated' | 'skipped'> {
+ const existing = await prisma.bookmark.findUnique({
+ where: { tweetId: tweet.tweetId },
+ select: {
+ id: true,
+ text: true,
+ authorHandle: true,
+ tweetCreatedAt: true,
+ _count: { select: { mediaItems: true } },
+ },
+ })
+
+ if (existing) {
+ if (!isIncomingRicher(tweet, existing)) return 'skipped'
+
+ const addMedia = existing._count.mediaItems === 0 && tweet.media.length > 0
+
+ await prisma.$transaction([
+ prisma.bookmark.update({
+ where: { id: existing.id },
+ data: {
+ text: tweet.text,
+ authorHandle: tweet.authorHandle,
+ authorName: tweet.authorName,
+ rawJson: tweet.rawJson ?? undefined,
+ tweetCreatedAt:
+ existing.tweetCreatedAt == null ? tweet.tweetCreatedAt : undefined,
+ enrichedAt: null,
+ semanticTags: null,
+ entities: null,
+ enrichmentMeta: null,
+ },
+ }),
+ prisma.bookmarkCategory.deleteMany({ where: { bookmarkId: existing.id } }),
+ ...(addMedia
+ ? [
+ prisma.mediaItem.createMany({
+ data: tweet.media.map((m) => ({
+ bookmarkId: existing.id,
+ type: m.type,
+ url: m.url,
+ thumbnailUrl: m.thumbnailUrl ?? null,
+ })),
+ }),
+ ]
+ : []),
+ ])
+
+ return 'updated'
+ }
+
+ // New tweet — create in a transaction
+ await prisma.$transaction(async (tx) => {
+ const created = await tx.bookmark.create({
+ data: {
+ tweetId: tweet.tweetId,
+ text: tweet.text,
+ authorHandle: tweet.authorHandle,
+ authorName: tweet.authorName,
+ tweetCreatedAt: tweet.tweetCreatedAt,
+ rawJson: tweet.rawJson ?? '',
+ source: tweet.source,
+ },
+ })
+
+ if (tweet.media.length > 0) {
+ await tx.mediaItem.createMany({
+ data: tweet.media.map((m) => ({
+ bookmarkId: created.id,
+ type: m.type,
+ url: m.url,
+ thumbnailUrl: m.thumbnailUrl ?? null,
+ })),
+ })
+ }
+ })
+
+ return 'imported'
+}
+
+/**
+ * Upserts a batch of tweets. Each tweet is processed individually so a single
+ * failure doesn't roll back the entire import. Returns aggregate counts.
+ */
+export async function upsertTweets(tweets: IncomingTweet[]): Promise {
+ const result: UpsertResult = { imported: 0, skipped: 0, updated: 0, errored: 0 }
+
+ for (const tweet of tweets) {
+ try {
+ const outcome = await upsertOne(tweet)
+ result[outcome]++
+ } catch (err) {
+ console.error(`Failed to import tweet ${tweet.tweetId}:`, err)
+ result.errored++
+ }
+ }
+
+ return result
+}
diff --git a/package-lock.json b/package-lock.json
index cccba22..262a75e 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -22,6 +22,7 @@
"jszip": "^3.10.1",
"lucide-react": "^0.576.0",
"next": "16.1.6",
+ "openai": "^6.27.0",
"react": "19.2.3",
"react-dom": "19.2.3",
"tailwind-merge": "^3.5.0"
@@ -38,7 +39,8 @@
"prisma": "^7.4.2",
"tailwindcss": "^4",
"tsx": "^4.21.0",
- "typescript": "^5"
+ "typescript": "^5",
+ "vitest": "^4.0.18"
}
},
"node_modules/@alloc/quick-lru": {
@@ -2835,6 +2837,331 @@
"integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==",
"license": "MIT"
},
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz",
+ "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz",
+ "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz",
+ "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz",
+ "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz",
+ "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz",
+ "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz",
+ "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz",
+ "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz",
+ "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz",
+ "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz",
+ "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-musl": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz",
+ "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz",
+ "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-musl": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz",
+ "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz",
+ "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz",
+ "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz",
+ "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz",
+ "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz",
+ "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openbsd-x64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz",
+ "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openbsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz",
+ "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz",
+ "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz",
+ "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz",
+ "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz",
+ "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
"node_modules/@rtsao/scc": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz",
@@ -3150,6 +3477,16 @@
"@types/node": "*"
}
},
+ "node_modules/@types/chai": {
+ "version": "5.2.3",
+ "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz",
+ "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==",
+ "dev": true,
+ "dependencies": {
+ "@types/deep-eql": "*",
+ "assertion-error": "^2.0.1"
+ }
+ },
"node_modules/@types/d3-color": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
@@ -3199,6 +3536,12 @@
"@types/d3-selection": "*"
}
},
+ "node_modules/@types/deep-eql": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz",
+ "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==",
+ "dev": true
+ },
"node_modules/@types/estree": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
@@ -3814,6 +4157,110 @@
"win32"
]
},
+ "node_modules/@vitest/expect": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz",
+ "integrity": "sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==",
+ "dev": true,
+ "dependencies": {
+ "@standard-schema/spec": "^1.0.0",
+ "@types/chai": "^5.2.2",
+ "@vitest/spy": "4.0.18",
+ "@vitest/utils": "4.0.18",
+ "chai": "^6.2.1",
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/mocker": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.18.tgz",
+ "integrity": "sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/spy": "4.0.18",
+ "estree-walker": "^3.0.3",
+ "magic-string": "^0.30.21"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "msw": "^2.4.9",
+ "vite": "^6.0.0 || ^7.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "msw": {
+ "optional": true
+ },
+ "vite": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vitest/pretty-format": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.18.tgz",
+ "integrity": "sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==",
+ "dev": true,
+ "dependencies": {
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/runner": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.18.tgz",
+ "integrity": "sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/utils": "4.0.18",
+ "pathe": "^2.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/snapshot": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.18.tgz",
+ "integrity": "sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/pretty-format": "4.0.18",
+ "magic-string": "^0.30.21",
+ "pathe": "^2.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/spy": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.18.tgz",
+ "integrity": "sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==",
+ "dev": true,
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/utils": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.18.tgz",
+ "integrity": "sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/pretty-format": "4.0.18",
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
"node_modules/@xyflow/react": {
"version": "12.10.1",
"resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.10.1.tgz",
@@ -4091,6 +4538,15 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/assertion-error": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz",
+ "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
"node_modules/ast-types-flow": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz",
@@ -4432,6 +4888,15 @@
],
"license": "CC-BY-4.0"
},
+ "node_modules/chai": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz",
+ "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ }
+ },
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
@@ -5124,6 +5589,12 @@
"node": ">= 0.4"
}
},
+ "node_modules/es-module-lexer": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz",
+ "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==",
+ "dev": true
+ },
"node_modules/es-object-atoms": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
@@ -5669,6 +6140,15 @@
"node": ">=4.0"
}
},
+ "node_modules/estree-walker": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
+ "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
+ "dev": true,
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ }
+ },
"node_modules/esutils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
@@ -5688,6 +6168,15 @@
"node": ">=6"
}
},
+ "node_modules/expect-type": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz",
+ "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12.0.0"
+ }
+ },
"node_modules/exsolve": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/exsolve/-/exsolve-1.0.8.tgz",
@@ -7835,6 +8324,16 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/obug": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz",
+ "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==",
+ "dev": true,
+ "funding": [
+ "https://github.com/sponsors/sxzz",
+ "https://opencollective.com/debug"
+ ]
+ },
"node_modules/ohash": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/ohash/-/ohash-2.0.11.tgz",
@@ -7851,6 +8350,27 @@
"wrappy": "1"
}
},
+ "node_modules/openai": {
+ "version": "6.27.0",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-6.27.0.tgz",
+ "integrity": "sha512-osTKySlrdYrLYTt0zjhY8yp0JUBmWDCN+Q+QxsV4xMQnnoVFpylgKGgxwN8sSdTNw0G4y+WUXs4eCMWpyDNWZQ==",
+ "license": "Apache-2.0",
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.25 || ^4.0"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
"node_modules/optionator": {
"version": "0.9.4",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
@@ -8519,6 +9039,50 @@
"node": ">=0.10.0"
}
},
+ "node_modules/rollup": {
+ "version": "4.59.0",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz",
+ "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==",
+ "dev": true,
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.59.0",
+ "@rollup/rollup-android-arm64": "4.59.0",
+ "@rollup/rollup-darwin-arm64": "4.59.0",
+ "@rollup/rollup-darwin-x64": "4.59.0",
+ "@rollup/rollup-freebsd-arm64": "4.59.0",
+ "@rollup/rollup-freebsd-x64": "4.59.0",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.59.0",
+ "@rollup/rollup-linux-arm-musleabihf": "4.59.0",
+ "@rollup/rollup-linux-arm64-gnu": "4.59.0",
+ "@rollup/rollup-linux-arm64-musl": "4.59.0",
+ "@rollup/rollup-linux-loong64-gnu": "4.59.0",
+ "@rollup/rollup-linux-loong64-musl": "4.59.0",
+ "@rollup/rollup-linux-ppc64-gnu": "4.59.0",
+ "@rollup/rollup-linux-ppc64-musl": "4.59.0",
+ "@rollup/rollup-linux-riscv64-gnu": "4.59.0",
+ "@rollup/rollup-linux-riscv64-musl": "4.59.0",
+ "@rollup/rollup-linux-s390x-gnu": "4.59.0",
+ "@rollup/rollup-linux-x64-gnu": "4.59.0",
+ "@rollup/rollup-linux-x64-musl": "4.59.0",
+ "@rollup/rollup-openbsd-x64": "4.59.0",
+ "@rollup/rollup-openharmony-arm64": "4.59.0",
+ "@rollup/rollup-win32-arm64-msvc": "4.59.0",
+ "@rollup/rollup-win32-ia32-msvc": "4.59.0",
+ "@rollup/rollup-win32-x64-gnu": "4.59.0",
+ "@rollup/rollup-win32-x64-msvc": "4.59.0",
+ "fsevents": "~2.3.2"
+ }
+ },
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@@ -8845,6 +9409,12 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/siginfo": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz",
+ "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==",
+ "dev": true
+ },
"node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
@@ -8929,6 +9499,12 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/stackback": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
+ "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==",
+ "dev": true
+ },
"node_modules/std-env": {
"version": "3.10.0",
"resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz",
@@ -9217,6 +9793,12 @@
"node": ">= 6"
}
},
+ "node_modules/tinybench": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
+ "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==",
+ "dev": true
+ },
"node_modules/tinyexec": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz",
@@ -9275,6 +9857,15 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
+ "node_modules/tinyrainbow": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz",
+ "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
@@ -9675,6 +10266,198 @@
}
}
},
+ "node_modules/vite": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz",
+ "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
+ "dev": true,
+ "dependencies": {
+ "esbuild": "^0.27.0",
+ "fdir": "^6.5.0",
+ "picomatch": "^4.0.3",
+ "postcss": "^8.5.6",
+ "rollup": "^4.43.0",
+ "tinyglobby": "^0.2.15"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^20.19.0 || >=22.12.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^20.19.0 || >=22.12.0",
+ "jiti": ">=1.21.0",
+ "less": "^4.0.0",
+ "lightningcss": "^1.21.0",
+ "sass": "^1.70.0",
+ "sass-embedded": "^1.70.0",
+ "stylus": ">=0.54.8",
+ "sugarss": "^5.0.0",
+ "terser": "^5.16.0",
+ "tsx": "^4.8.1",
+ "yaml": "^2.4.2"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "jiti": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ },
+ "tsx": {
+ "optional": true
+ },
+ "yaml": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vite/node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+ "dev": true,
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vite/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/vitest": {
+ "version": "4.0.18",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.18.tgz",
+ "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/expect": "4.0.18",
+ "@vitest/mocker": "4.0.18",
+ "@vitest/pretty-format": "4.0.18",
+ "@vitest/runner": "4.0.18",
+ "@vitest/snapshot": "4.0.18",
+ "@vitest/spy": "4.0.18",
+ "@vitest/utils": "4.0.18",
+ "es-module-lexer": "^1.7.0",
+ "expect-type": "^1.2.2",
+ "magic-string": "^0.30.21",
+ "obug": "^2.1.1",
+ "pathe": "^2.0.3",
+ "picomatch": "^4.0.3",
+ "std-env": "^3.10.0",
+ "tinybench": "^2.9.0",
+ "tinyexec": "^1.0.2",
+ "tinyglobby": "^0.2.15",
+ "tinyrainbow": "^3.0.3",
+ "vite": "^6.0.0 || ^7.0.0",
+ "why-is-node-running": "^2.3.0"
+ },
+ "bin": {
+ "vitest": "vitest.mjs"
+ },
+ "engines": {
+ "node": "^20.0.0 || ^22.0.0 || >=24.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "@edge-runtime/vm": "*",
+ "@opentelemetry/api": "^1.9.0",
+ "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0",
+ "@vitest/browser-playwright": "4.0.18",
+ "@vitest/browser-preview": "4.0.18",
+ "@vitest/browser-webdriverio": "4.0.18",
+ "@vitest/ui": "4.0.18",
+ "happy-dom": "*",
+ "jsdom": "*"
+ },
+ "peerDependenciesMeta": {
+ "@edge-runtime/vm": {
+ "optional": true
+ },
+ "@opentelemetry/api": {
+ "optional": true
+ },
+ "@types/node": {
+ "optional": true
+ },
+ "@vitest/browser-playwright": {
+ "optional": true
+ },
+ "@vitest/browser-preview": {
+ "optional": true
+ },
+ "@vitest/browser-webdriverio": {
+ "optional": true
+ },
+ "@vitest/ui": {
+ "optional": true
+ },
+ "happy-dom": {
+ "optional": true
+ },
+ "jsdom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vitest/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
@@ -9780,6 +10563,22 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/why-is-node-running": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
+ "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==",
+ "dev": true,
+ "dependencies": {
+ "siginfo": "^2.0.0",
+ "stackback": "0.0.2"
+ },
+ "bin": {
+ "why-is-node-running": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
"node_modules/word-wrap": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz",
diff --git a/package.json b/package.json
index f141c9f..d6af708 100644
--- a/package.json
+++ b/package.json
@@ -12,7 +12,8 @@
"build": "next build",
"start": "next start",
"lint": "eslint",
- "siftly": "tsx cli/siftly.ts"
+ "siftly": "tsx cli/siftly.ts",
+ "test": "vitest run"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.78.0",
@@ -29,6 +30,7 @@
"jszip": "^3.10.1",
"lucide-react": "^0.576.0",
"next": "16.1.6",
+ "openai": "^6.27.0",
"react": "19.2.3",
"react-dom": "19.2.3",
"tailwind-merge": "^3.5.0"
@@ -45,6 +47,7 @@
"prisma": "^7.4.2",
"tailwindcss": "^4",
"tsx": "^4.21.0",
- "typescript": "^5"
+ "typescript": "^5",
+ "vitest": "^4.0.18"
}
}
diff --git a/vitest.config.mts b/vitest.config.mts
new file mode 100644
index 0000000..ed80896
--- /dev/null
+++ b/vitest.config.mts
@@ -0,0 +1,17 @@
+import { defineConfig } from 'vitest/config'
+import path from 'path'
+import { fileURLToPath } from 'url'
+
+const __dirname = path.dirname(fileURLToPath(import.meta.url))
+
+export default defineConfig({
+ resolve: {
+ alias: {
+ '@': path.resolve(__dirname, '.'),
+ },
+ },
+ test: {
+ include: ['**/*.test.ts'],
+ exclude: ['node_modules'],
+ },
+})