diff --git a/.claude/decisions.md b/.claude/decisions.md new file mode 100644 index 0000000..dec4026 --- /dev/null +++ b/.claude/decisions.md @@ -0,0 +1,206 @@ +# Decision Log + +Audit trail of all major decisions in TokenVotingUtil collaboration. This file is **local** (not in GitHub) and tracks thinking for future reference. + +--- + +## 2026-02-25: Reset ADN — Establish Healthy Collaboration Framework + +### Question +How do we structure long-term collaboration with sollama58 (volunteer maintainer) in a way that scales and doesn't require constant re-alignment? + +### Options Considered + +**Option A: Prescriptive** (Bad) +- We code fixes locally +- Push PRs with changes already made +- Wait for feedback / approval +- Con: Surprises sollama58, doesn't respect his bandwidth + +**Option B: Collaborative with Governance** (Chosen) +- We propose ideas in Issues first +- sollama58 provides feedback (async) +- We code based on his decision +- Decisions documented for future reference +- Pro: Transparent, respectful, scalable + +**Option C: No Structure** (Worst) +- Ad-hoc communication +- Decisions scattered in Slack/Discord +- Hard to track what was decided and why +- Con: Knowledge loss, repeating conversations + +### Decision +**Chosen: Option B** (Collaborative with Governance) + +### Implementation +1. Create governance documents: + - `COLLABORATION.md` — How we work together + - `ARCHITECTURE.md` — Technical overview + known issues + - `ROADMAP.md` — Phases 1-3 with decision points + - `.claude/decisions.md` — This file (local audit trail) + +2. Structure Phase 1 as two étapes: + - Étape 1: Infrastructure hardening (ready to merge now) + - Étape 2: Voting logic (blocked on decision) + +3. Open GitHub Issue #1: + - Propose Phase 1 roadmap + - Ask sollama58 for feedback on Phase 1 Étape 2 (3 paths: A/B/C) + - Get explicit decision before coding + +4. Open GitHub PR #1: + - Tier 1 gaps (CORS, tests, logging, Redis) + - Reference Issue #1 + - Link to DEPLOYMENT.md and ARCHITECTURE.md + +### Rationale +- **Transparent**: All plans visible to sollama58 before we code +- **Respectful**: Doesn't force decisions on him +- **Scalable**: Structure repeats for Phase 2, 3, etc. +- **Documented**: Future collaborators understand the why +- **Async-friendly**: Works with volunteer's sporadic availability + +### Blocked By +Nothing. This is foundational. + +### Outcome +- 4 governance docs created (COLLABORATION, ARCHITECTURE, ROADMAP, this file) +- Clear decision framework for Phase 1 Étape 2 +- GitHub Issue #1 ready to open (waits for user approval) +- GitHub PR #1 structured properly (references Issue #1, DEPLOYMENT, ARCHITECTURE) + +### Next Decision Needed +sollama58's response to Issue #1: +- Approve Phase 1 Étape 1 for merge? +- Which path for Phase 1 Étape 2? (A: quick fixes, B: full backend, C: defer) +- Timeline preferences? + +--- + +## 2026-02-24: Infrastructure Fixes (Phase 1 Étape 1) + +### Question +What are the critical infrastructure gaps in TokenVotingUtil that block production deployment? + +### Decision +Identified 4 Tier 1 gaps: +1. CORS vulnerability (open to any origin) +2. Zero test suite +3. No structured logging +4. In-memory rate limiting (doesn't survive restart) + +### Implementation +5 atomic commits on `dev/gaps-tier1`: +1. `8cd5f42` — CORS allowlist + CSP hardening +2. `3f25631` — Jest test suite (20 tests) +3. `adc70af` — Winston structured logging +4. `79da4da` — Redis-backed rate limiter +5. `6b0cba5` — Documentation + integration tests + +### Testing +- All 34 tests pass (unit + integration) +- Manual verification of each fix +- Code coverage: 9.82% (API surface) + +### Outcome +Phase 1 Étape 1 ready for sollama58 review and merge. + +--- + +## 2026-02-24: Voting Logic Audit + +### Question +Are there bugs in the voting logic that would break governance? + +### Findings +Identified 6 issues: + +**Critical**: +1. Margin >= threshold (should be >) +2. Frontend tally authority (no server verification) + +**Medium**: +3. No tie detection +4. Voting power TOCTOU risk +5. Vote recalculation not enforced + +**Low**: +6. Zero pool edge case (handled, but untested) + +### Decision +3 critical/medium issues warrant Phase 1 Étape 2 work: +1. Fix margin bug (30 min, low risk) +2. Add tie detection (30 min, low risk) +3. Backend tally authority (architectural, needs sollama58 input) + +### Blocked By +sollama58's decision on Path A/B/C (see ROADMAP.md) + +--- + +## Future Decisions Needed + +### 2026-02-25 (Pending) +**Issue #1: Phase 1 Roadmap** +- Q: Which path for voting fixes? (A: quick, B: full backend, C: defer) +- Q: Timeline for Phase 1 Étape 1 merge? +- Q: Timeline for Phase 1 Étape 2? +- Q: Any other Phase 1 priorities? + +### 2026-03-XX (Estimated, pending Phase 1) +**Phase 1 Complete** +- Decide on Phase 2 timeline +- Assess multi-tenant needs +- Plan scaling strategy + +### 2026-04-XX (Estimated, pending Phase 2) +**Phase 2 Complete** +- Decide if Phase 3 (advanced features) needed +- Assess governance launch readiness +- Plan community outreach + +--- + +## Decision Tracking Format + +When a new decision is needed: +1. Open GitHub Issue with clear question +2. Document options (with pros/cons) +3. Ask for explicit decision +4. Update this file when sollama58 responds +5. Reference decision in code comments / commits + +--- + +## Notes on Collaboration Style + +**Lessons Learned** (from this reset): +- Don't code solutions before asking questions +- Don't assume what the maintainer wants +- Don't hide decisions in commit messages +- Don't open PRs without context (link to issue) +- Don't skip documentation + +**This Prevents**: +- "Why did you do this?" conversations +- Re-doing work because we misunderstood +- Losing context when decisions are made +- Knowledge silos (one person knows why) + +**This Enables**: +- Scaling collaboration (works with any maintainer) +- Async work (no real-time sync needed) +- Long-term sustainability (future contributors understand) +- Trust building (transparent process) + +--- + +## Review & Refinement + +This document is **living**. Update it as: +- New decisions are made +- Outcomes are known +- Process is refined based on sollama58's feedback + +**Questions about this workflow?** Open an issue or ask sollama58 for feedback. diff --git a/.env.example b/.env.example index 8275867..a5b4850 100644 --- a/.env.example +++ b/.env.example @@ -6,3 +6,6 @@ TOKEN_DECIMALS=6 DATABASE_URL=postgresql://user:password@localhost:5432/lockverifier ADMIN_PASSWORD=change-me-to-a-strong-password SITE_TITLE=ASDelegate +ALLOWED_ORIGINS=http://localhost:3000,http://localhost:5173 +LOG_LEVEL=info +REDIS_URL=redis://localhost:6379 diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 0000000..b82169c --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,223 @@ +# Architecture Overview + +TokenVotingUtil is a Solana lock verifier + governance platform. This document explains the design, current state, and constraints. + +## High-Level Purpose + +**Lock Verifier**: +- Display all locked tokens for a given Solana SPL token mint +- Show lock details, unlock timelines, lock names +- Cache data from Streamflow (avoid RPC rate limits) + +**Governance Platform**: +- Create proposals (weighted by token holdings) +- Vote with two modes: locked-only or all-holders +- Tally results with configurable thresholds +- Embeddable on Squarespace (iframe) + +## Technology Stack + +| Layer | Tech | Notes | +|-------|------|-------| +| **Frontend** | Vanilla HTML/CSS/JS | Single file: `public/index.html` (3KB+) | +| **Backend** | Node.js + Express | ~400 LOC in `server.js` | +| **Database** | PostgreSQL | Caches lock data + proposal/vote records | +| **Blockchain** | Solana + Streamflow | RPC calls via `@solana/web3.js` + `@streamflow/stream` | +| **Cache Layer** | PostgreSQL + In-Memory | Avoid Solana RPC rate limits (300s TTL) | +| **Rate Limiting** | Redis (with fallback) | Distributed rate limiting across instances | +| **Logging** | Winston | Structured logging (console + file) | + +## Modules + +### `server.js` (405 LOC) +Main Express app. Routes: +- `GET /api/locks` — Fetch all lock data +- `POST /api/proposals` — Create proposal +- `GET /api/proposals` — List proposals +- `POST /api/proposals/:id/vote` — Cast vote +- `POST /api/locks/:id/name` — Rename lock +- Admin endpoints (protected by password) + +**Middleware**: +- CORS allowlist (via `ALLOWED_ORIGINS` env var) +- CSP headers (`frame-ancestors` restriction) +- Rate limiter (Redis-backed) +- JSON parser (16KB limit) + +### `streamflow.js` (380 LOC) +Blockchain integration: +- `fetchLockData()` — Query Streamflow API for locks +- `getWalletTokenBalance()` — Fetch wallet SPL token balance +- `refreshData()` — Periodic background refresh +- Cache management (in-memory + PostgreSQL) + +### `db.js` (173 LOC) +PostgreSQL integration: +- Proposal CRUD (create, read, list, delete) +- Vote operations (insert, query, delete) +- Lock name overrides (setName, deleteName) +- Stats (getStats) + +**Tables**: +- `proposals` (id, title, choices, threshold, vote_mode, ends_at, created_at) +- `votes` (proposal_id, wallet, choice_index, voting_power) +- `lock_names` (lock_id, name, set_by, set_at) + +### `logger.js` (50 LOC) +Winston structured logging: +- Console output (dev): colored, human-readable +- File output (prod): JSON, for log aggregation +- Levels: debug, info, warn, error +- Metadata support (userId, action, reason, etc.) + +### `rate-limiter.js` (159 LOC) +Distributed rate limiting: +- Redis sorted sets for tracking requests +- Fallback to in-memory if Redis unavailable +- RFC 6585 compliant headers (X-RateLimit-*) +- Graceful shutdown + +### `public/index.html` (3100+ LOC) +Single-page frontend: +- Lock dashboard (summary, stats, timeline) +- Lock browser (sortable/filterable table) +- Proposal creation + voting UI +- Admin panel (password-protected) +- Wallet connection (Phantom, Solflare, etc.) +- Vote tallying logic (client-side) + +--- + +## Current Issues & Constraints + +### Known Bugs (Tier 2) + +1. **Margin Comparison Bug** + - Location: `public/index.html:1536` + - Issue: `margin >= threshold` should be `margin > threshold` + - Impact: Proposals pass when margin equals threshold (should exceed) + - Risk: HIGH (affects governance integrity) + +2. **Frontend Tally Authority** + - Location: `public/index.html` (entire tallyProposal function) + - Issue: Results calculated client-side only, no server verification + - Impact: Tally could be manipulated or inconsistent across clients + - Risk: HIGH (governance authority unclear) + +3. **Tie Detection** + - Location: `public/index.html:2050` + - Issue: No explicit handling when two choices have equal votes + - Impact: Silent tie-breaking by array order (non-deterministic) + - Risk: MEDIUM (edge case, but affects proposal legitimacy) + +### Design Constraints + +**Frontend-Based Voting Tally**: +- Currently: Frontend aggregates votes, calculates winner +- Pro: Reduces server load +- Con: No authoritative source of truth; can't easily audit + +**No Voting Logic Tests**: +- Unit tests cover modules, not voting math +- Integration tests needed for edge cases + +**In-Memory Rate Limiting Fallback**: +- If Redis unavailable, falls back to in-memory +- Resets on app restart (acceptable for now) + +### Deployment Constraints + +**Render Limitations**: +- Ephemeral filesystem (logs deleted on redeploy) +- No persistent Redis on free tier +- Node.js only (no compiled languages) + +**Solana RPC Limits**: +- Public RPC has strict rate limits +- Requires dedicated provider (Helius, QuickNode) +- Caching essential (300s TTL default) + +--- + +## Decision Tree: What Needs Fixing? + +``` +Phase 1: Stabilize Infrastructure ✓ +├─ CORS allowlist (security) ✓ +├─ Test suite (reliability) ✓ +├─ Structured logging (debuggability) ✓ +└─ Redis rate limiting (scalability) ✓ + +Phase 2: Fix Voting Logic (PENDING APPROVAL) +├─ Fix margin >= bug (critical) +├─ Add tie detection (medium) +└─ Backend tally authority? (architectural) + +Phase 3: Scale & Enhance (FUTURE) +├─ Multi-tenant support +├─ Config externalization +└─ Leaderboard / XP system +``` + +--- + +## Testing Strategy + +### Current (Phase 1) +- Unit tests: Export verification (jest setup) +- Integration tests: CORS, rate limiting, logging +- Manual testing: Local server + curl + +### Phase 2 (Proposed) +- Voting math tests (margin, tie detection) +- Backend tally tests (if implemented) +- End-to-end vote scenario tests + +### Future +- Load testing (simulate high voting load) +- Blockchain integration tests +- Multi-tenant isolation tests + +--- + +## Deployment Flow + +``` +1. Developer: Code changes locally, write tests +2. Developer: Open PR with clear description +3. sollama58: Review + approve/request changes +4. Developer: Fix feedback, update PR +5. sollama58: Merge to main +6. Render: Automatically deploys from main +7. Production: App live at https://.onrender.com + +Environments: +- develop branch (future): staging at https://tokenvoter-dev.onrender.com +- main branch: production at https://.onrender.com +``` + +--- + +## Next Steps + +See `ROADMAP.md` for detailed phase breakdown. + +**Immediate** (Phase 1 - Étape 1): +- [ ] Tier 1 gaps (CORS, tests, logging, Redis) — ready for review +- [ ] Issue #1: "Phase 1 Roadmap" — proposed, awaiting feedback + +**Blocked** (Phase 1 - Étape 2): +- [ ] Issue #2: "Voting Logic Audit" — depends on Phase 1 approval +- [ ] Bug fixes — depends on decision on backend tally + +--- + +## Contact & Questions + +If architecture is unclear: +1. Check this file (ARCHITECTURE.md) +2. Check related issue on GitHub +3. Open new issue with `architecture` label +4. We'll update docs based on questions + +**Goal**: This file should answer 80% of "how does this work?" questions. diff --git a/COLLABORATION.md b/COLLABORATION.md new file mode 100644 index 0000000..d24fb20 --- /dev/null +++ b/COLLABORATION.md @@ -0,0 +1,129 @@ +# Collaboration Workflow + +This document defines how we collaborate on TokenVotingUtil — transparently, asynchronously, and with clear decision-making. + +## Principles + +1. **Async-First**: All major communication via GitHub Issues/PRs (not Slack) +2. **Transparent**: Decisions documented, not hidden in commit messages +3. **Incremental**: Small, reviewable PRs; no big surprise refactors +4. **Respectful**: Adapt to maintainer's bandwidth and vision +5. **Traceable**: All decisions logged for future reference + +--- + +## Communication Channels + +### Issues (Questions, Decisions, Blockers) + +Use issues for: +- Questions that need answers before coding +- Proposals for major changes +- Blockers that need decision-making +- Feature requests / design discussions + +**Labels** (recommended): +- `needs-feedback`: Waiting for sollama58's response +- `decision-needed`: Multiple options, need approval before proceeding +- `blocked-by-design`: Architectural question, blocks implementation +- `phase-1`, `phase-2`, `phase-3`: Phase tracking + +**Response Time**: Expect 2-3 days (realistic for volunteer maintainer) + +### Pull Requests (Solutions) + +Use PRs for: +- Implementing approved solutions +- Showing concrete code + tests +- Ready to merge OR request changes + +**Guidelines**: +- One feature/fix per PR (atomic, independent) +- Detailed description: why, what, testing, risks +- Link to related issue (`Closes #X` or `Related to #Y`) +- No force-push after review started +- Can merge after approval OR fix feedback + +**Review Checklist**: +- [ ] Code style consistent with project +- [ ] Tests pass locally +- [ ] No breaking changes to existing API +- [ ] Deployment notes followed (if applicable) + +### Discussions (Real-Time Collaboration) + +Use GitHub Discussions if: +- You need a real-time conversation (rare) +- Complex design decision needs back-and-forth +- Post summary in issue when done (archive the conversation) + +--- + +## Decision Log + +All major decisions documented in `.claude/decisions.md` (this repo) and tracked in GitHub Issues. + +**Decision Structure**: +``` +### YYYY-MM-DD: [Title] + +**Question**: What were we deciding? +**Options**: A, B, C (with pros/cons) +**Decision**: We chose option X +**Rationale**: Why X makes sense +**Outcome**: What we implemented +**Blocked By**: Any dependencies +``` + +--- + +## Phase Structure + +See `ROADMAP.md` for complete roadmap. + +**Phase 1 (Current)**: Stabilize & Harden +- Infrastructure fixes (CORS, tests, logging, rate limiting) +- Voting logic audit +- Bug fixes + +**Phase 2**: Production Ready +- Fix identified bugs +- Backend voting authority (if approved) +- Config externalization + +**Phase 3**: Scale & Enhance +- Multi-tenant support +- Leaderboard / XP system +- Advanced governance features + +--- + +## What Success Looks Like + +✓ sollama58 can easily understand what we're proposing +✓ No surprises — all changes announced in issues first +✓ PRs are easy to review (one feature, clear testing) +✓ Decisions are traceable (why did we do this?) +✓ Project can be maintained independently without our help + +--- + +## Current Status + +**Phase 1 - Étape 1: Infrastructure Fixes** +- [ ] Issue #1 opened: "Phase 1 Roadmap" +- [ ] PR #1 opened: "Tier 1 gaps (CORS + tests + logging + Redis)" +- [ ] Awaiting sollama58 feedback + +**Phase 1 - Étape 2: Voting Logic Fixes** +- [ ] Issue #2 opened: "Voting Logic Audit" (if needed after Issue #1 response) +- [ ] Awaiting decision on bug fixes +- [ ] Implementation pending approval + +--- + +## Questions? + +If anything in this workflow is unclear, open an issue. We can discuss and adjust. + +**Goal**: Make collaboration smooth for everyone. diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 0000000..1e56ce7 --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,193 @@ +# Deployment Checklist + +This document provides step-by-step instructions for deploying TokenVotingUtil to production. + +## Local Development Setup + +### Prerequisites +- Node.js >= 18 +- PostgreSQL (local or remote) +- Redis (optional; falls back to in-memory) + +### Installation + +```bash +# Clone and install +git clone +cd TokenVotingUtil +npm install + +# Create environment file +cp .env.example .env + +# Edit .env with your local configuration +nano .env + +# Create logs directory (for production file logging) +mkdir -p logs + +# Start development server +npm run dev +``` + +### Local Testing + +```bash +# Run test suite +npm test + +# Start server (will auto-reload on file changes) +npm run dev + +# In another terminal, test the API +curl http://localhost:3000/api/health +curl http://localhost:3000/api/locks +``` + +## Render Deployment + +### Step 1: Provision Resources + +1. **PostgreSQL Database** + - In Render Dashboard: New > PostgreSQL + - Name: `lockverifier-db` + - Region: Oregon (or your preference) + - Render will auto-provide `DATABASE_URL` env var + +2. **Redis (Optional but Recommended)** + - In Render Dashboard: New > Redis + - Name: `lockverifier-redis` + - Region: Same as PostgreSQL + - Render will auto-provide `REDIS_URL` env var + +### Step 2: Set Environment Variables + +In Render Dashboard, go to your web service's **Environment** tab and set: + +| Variable | Value | Required | +|----------|-------|----------| +| `SOLANA_RPC_URL` | Your Helius/QuickNode RPC endpoint | ✓ | +| `ADMIN_PASSWORD` | Strong password for admin panel | ✓ | +| `ALLOWED_ORIGINS` | Your domain(s): `https://yourdomain.com,https://www.yourdomain.com` | ✓ | +| `TOKEN_MINT` | `9zB5wRarXMj86MymwLumSKA1Dx35zPqqKfcZtK1Spump` | ✓ | +| `PORT` | `3000` (Render sets this automatically) | — | +| `CACHE_TTL_SECONDS` | `300` (or your preference) | — | +| `TOKEN_DECIMALS` | `6` | — | +| `SITE_TITLE` | `ASDelegate` (or your site name) | — | +| `LOG_LEVEL` | `info` (or `debug`/`warn`/`error`) | — | +| `NODE_ENV` | `production` | — | + +**Note**: `DATABASE_URL` and `REDIS_URL` are auto-set by Render if you provisioned them via blueprint or manual creation. + +### Step 3: Build and Deploy + +If using `render.yaml` blueprint: +1. Push code to GitHub +2. In Render: New > Blueprint +3. Connect your GitHub repo +4. Render reads `render.yaml` and creates web service + PostgreSQL +5. Review environment variables (from Step 2) +6. Click **Deploy** + +If manual deploy: +1. Create web service in Render +2. Connect GitHub repo + branch +3. Build command: `npm install` +4. Start command: `npm start` +5. Set environment variables (Step 2) +6. Deploy + +### Step 4: Verify Deployment + +Once deployed, test the live app: + +```bash +# Health check +curl https://.onrender.com/api/health + +# Fetch locks +curl https://.onrender.com/api/locks + +# Check admin panel password (should return 200 or 401) +curl -X POST https://.onrender.com/api/admin/auth \ + -H "Content-Type: application/json" \ + -d '{"password":"your-admin-password"}' +``` + +### Step 5: Monitor Logs + +In Render Dashboard, go to **Logs** to check: +- Application startup (should see "Server running on port 3000") +- Database connection (should see "Database initialized") +- Redis connection (should see "Redis connected" or "Redis not available, falling back to in-memory") +- Any errors during operation + +## Common Issues + +### CORS Errors on Embedded Iframe + +**Problem**: Iframe on your website gets blocked. + +**Solution**: +1. Ensure `ALLOWED_ORIGINS` includes your domain +2. Test with curl: `curl -H "Origin: https://yourdomain.com" https://.onrender.com/api/locks` +3. Should see `Access-Control-Allow-Origin: https://yourdomain.com` in response headers + +### Rate Limiting Blocked Requests + +**Problem**: Users get 429 (Too Many Requests) errors. + +**Solution**: +1. Verify Redis is provisioned and `REDIS_URL` is set +2. Check logs: should see "Redis connected" or "falling back to in-memory" +3. Rate limits: 30 seconds for refresh, 60 seconds for voting +4. If in-memory fallback: restarting the app resets rate limits + +### File Logging Fails in Production + +**Problem**: `logs/error.log` and `logs/combined.log` don't appear. + +**Solution**: +1. Render auto-creates the `logs/` directory during build +2. Check logs in Render dashboard (not local files) +3. Logs are ephemeral on Render; use external log aggregation for persistent logs + +### Redis Connection Timeout + +**Problem**: Service hangs during startup. + +**Solution**: +1. Verify `REDIS_URL` is correct +2. Ensure Redis instance is running +3. App will timeout and fall back to in-memory (check logs) +4. For production reliability, provision Redis through Render + +## Rollback + +If deployment fails: +1. Go to Render Dashboard > Deployments +2. Click the previous successful deployment +3. Click "Redeploy" +4. The app will roll back to the last known-good state + +## Monitoring Checklist + +After deployment, verify: + +- [ ] Service is running (health check passes) +- [ ] Database tables created (check Render Postgres logs) +- [ ] CORS headers present (test with curl) +- [ ] Rate limiting active (test with rapid requests) +- [ ] Logging works (check Render logs) +- [ ] Admin panel accessible (login with admin password) +- [ ] Can fetch locks from Streamflow +- [ ] Voting works (create proposal, cast vote, tally works) + +## Support + +For issues: +1. Check Render logs first (Logs tab) +2. Test locally with same `.env` variables +3. Verify all required environment variables are set +4. Check PostgreSQL and Redis connections + diff --git a/GAP-ANALYSIS.md b/GAP-ANALYSIS.md new file mode 100644 index 0000000..c6f879a --- /dev/null +++ b/GAP-ANALYSIS.md @@ -0,0 +1,590 @@ +# Gap Analysis — Complete Empirical Audit + +**Date**: 2026-02-25 +**Scope**: 4,600 LOC (server.js, streamflow.js, db.js, index.html, rate-limiter.js, logger.js) +**Issues Found**: 22 (2 CRITICAL, 6 HIGH, 9 MEDIUM, 5 LOW) +**Audit Method**: Code review + pattern analysis + +--- + +## Executive Summary + +**Security**: Wallet signature verification is missing. Without it, anyone can impersonate any wallet and vote as them. This is a **governance killer** and must be fixed before any real governance launch. + +**Logic**: 3 voting bugs identified (margin >=, tie detection, tally authority). Two are fixable in 30 min each. One requires architectural change. + +**Operational**: Missing structured logging, graceful shutdown gaps, health check too simplistic. + +**Architecture**: Schema migrations run on every boot, no transaction handling, results not persisted. + +**Verdict**: UX + blockchain parsing solid. Core security gap blocks production. Otherwise incremental fixes. + +--- + +## CRITICAL ISSUES (2) + +### SEC-001: No Wallet Signature Verification + +**Severity**: CRITICAL +**Category**: Security +**Module**: server.js (all POST routes accepting `wallet`) +**Tier**: 1 + +**The Problem** +Every governance action (create proposal, vote, rename lock) accepts a wallet address in the request body with zero cryptographic proof that the sender controls it. + +```js +// server.js:209 - insecure +const { wallet, choiceIndex } = req.body; +await insertVote(proposalId, wallet, choiceIndex, votingPower); +``` + +**Impact** +A curl command can vote as any wallet: +```bash +curl -X POST http://localhost:3000/api/proposals/123/vote \ + -H "Content-Type: application/json" \ + -d '{"wallet":"FakeWallet123...","choiceIndex":0}' +# Vote recorded as FakeWallet, with their real voting power +``` + +The entire governance layer is spoofable. + +**Expected** +Caller signs a message with their private key (using `@solana/web3.js`). Backend verifies signature using `nacl.sign.detached.verify()`. + +```js +// Proposed fix +const { wallet, message, signature } = req.body; +const isValid = nacl.sign.detached.verify( + Buffer.from(message), + Buffer.from(signature, 'base64'), + new PublicKey(wallet).toBytes() +); +if (!isValid) return res.status(401).json({ error: "Invalid signature" }); +``` + +**Effort**: 2h (backend nonce + verification) + 30min frontend (sign before each action) +**Blocker**: Decision on nonce strategy (per-action vs session-based) +**Phase**: Tier 1 (must fix before governance launch) + +--- + +### SEC-002: Admin Password in Plaintext, Stored in JS Memory + +**Severity**: CRITICAL +**Category**: Security +**Module**: server.js:290, index.html:2711 +**Tier**: 1 + +**The Problem** +Admin password is sent in request body and stored in browser memory as a JS global variable. It's visible via DevTools, and reused as the `x-admin-key` header on every subsequent admin request. + +```js +// index.html:2711 +adminKey = pw; // plain password stored in window scope +// index.html:2712 (every admin call) +headers["x-admin-key"] = adminKey; // password in header +``` + +**Impact** +Browser DevTools → Application → JavaScript memory → `adminKey = "correct-horse-battery-staple"` exposed. If anyone gains browser access (malware, MITM), the admin password is compromised. + +**Expected** +Use JWT or session cookie. Backend issues a token after successful auth. Frontend stores token (not password) and sends token in header. + +```js +// Proposed: Auth endpoint returns token +POST /api/admin/auth +{ "password": "..." } +→ { "token": "eyJhbGciOiJIUzI1NiIs..." } + +// Store token in memory or secure httpOnly cookie +// Send token in header (not password) +headers["x-admin-token"] = token; +``` + +**Effort**: 2h (JWT issuance + verification, replace header logic) +**Blocker**: None +**Phase**: Tier 1 (must fix before production) + +--- + +## HIGH SEVERITY ISSUES (6) + +### BUG-001: Margin >= Threshold (Should Be >) + +**Severity**: HIGH +**Category**: Logic +**Module**: index.html:1536 +**Tier**: 1 (identified, awaiting sollama58 decision) + +**Description** +`checkThreshold` marks proposal as passed when margin equals threshold, not exceeds it. + +```js +// WRONG +return { met: margin >= threshold, margin }; + +// CORRECT +return { met: margin > threshold, margin }; +``` + +**Example** +Threshold = 5%, Margin = 5% → `passed = true` (WRONG, should be false) + +**Impact** +Edge case proposals that barely meet threshold are indistinguishable from those that exceed it. Floating-point rounding on token amounts can flip this check. + +**Effort**: 5 min +**Phase**: Tier 1 (Phase 1 Étape 2, Option A/B/C path) + +--- + +### BUG-002: Frontend is Sole Tally Authority + +**Severity**: HIGH +**Category**: Logic / Architecture +**Module**: index.html:2033-2051 (no equivalent in backend) +**Tier**: 1 (identified, Option A/B/C decision) + +**Description** +Proposal results (Passed/Failed) are computed only in frontend JavaScript. Backend has no tally endpoint. Two clients with different data (e.g., different `tokenTotalSupply` snapshots) will compute different results for the same proposal. + +```js +// index.html:2033 - client only +function tallyProposal(p) { + var totals = [0, 0, 0]; + p.votes.forEach(v => { totals[v.choice_index] += v.voting_power; }); + var winner = Math.max(...totals); + return { passed: checkThreshold(totals, getTotalPool(p.vote_mode), p.threshold) }; +} +``` + +No server equivalent exists. + +**Impact** +- Malicious JS modification can falsify results. +- No canonical result exists for dispute resolution. +- Audit impossible (no server logs of tally computation). + +**Expected** +Backend `/api/proposals/:id/result` endpoint computes tally server-side, stores immutable result when proposal closes. + +**Effort**: 2h (add endpoint, compute server-side, persist result) +**Phase**: Tier 1 (Option B path from roadmap, or deferred to Phase 2) + +--- + +### BUG-003: TOCTOU on Voting Power + +**Severity**: HIGH +**Category**: Logic +**Module**: server.js:237-264 +**Tier**: 2 + +**Description** +Between `hasVoted()` check and `insertVote()`, no transaction wraps the operation. Concurrent duplicate vote requests could theoretically both pass the `hasVoted` check before either inserts. The PRIMARY KEY constraint catches this at the DB level, but the error is returned as 500 ("Vote failed") instead of 409 ("Already voted"). + +```js +// TOCTOU window +const alreadyVoted = await hasVoted(proposalId, wallet); // check +if (alreadyVoted) return res.status(409).json({ error: "Already voted" }); +// ... gap: another identical request can pass the check here +await insertVote(proposalId, wallet, choiceIndex, votingPower); // action +``` + +**Impact** +Duplicate vote returns 500 instead of 409, confusing the user. DB constraint prevents actual duplicate, but error handling is wrong. + +**Effort**: 30 min (catch unique constraint error specifically, return 409) +**Phase**: Tier 2 + +--- + +### BUG-004: No Tie Detection + +**Severity**: HIGH +**Category**: Logic +**Module**: index.html:2048-2051 +**Tier**: 2 + +**Description** +When two choices have equal voting power, `tallyProposal` shows whichever comes first as "winner" with no indication of a tie. + +```js +// No tie detection +var winnerIdx = 0; +totals.forEach(function (t, i) { if (t > totals[winnerIdx]) winnerIdx = i; }); +// winnerIdx now points to first option, silently winning a tie +``` + +**Impact** +Governance results are misleading. 50/50 split shows one option as winner in proposal history. + +**Effort**: 30 min +**Phase**: Tier 2 (Phase 1 Étape 2, Option A/B) + +--- + +### BUG-005: refreshing Flag Not Properly Gated on Cold Start + +**Severity**: HIGH +**Category**: Logic +**Module**: streamflow.js:278-341 +**Tier**: 2 + +**Description** +When multiple concurrent requests hit `fetchLockData()` during cold start (no `memoryCache.data`), the second request sees `refreshing = true` and returns immediately without waiting. If `memoryCache.data` is still empty, it throws "No lock data available yet" (line 380). + +```js +// streamflow.js:278 +if (refreshing) return; // second request returns immediately +// streamflow.js:374 +await refreshData(); +if (memoryCache.data) { return ... } +throw new Error("No lock data available yet"); // second request fails +``` + +**Impact** +During cold start with concurrent load, some requests fail with 503 while others wait. Confuses monitoring and error alerting. + +**Effort**: 30 min (return a Promise that waiting callers can await) +**Phase**: Tier 2 + +--- + +### SEC-003: Wallet Address Format Not Validated on Vote/Proposal Routes + +**Severity**: HIGH +**Category**: Security +**Module**: server.js:149-150, 211-212 +**Tier**: 2 + +**Description** +`/api/wallet/:address/balance` validates address format (line 122). But `/api/proposals` (create) and `/api/proposals/:id/vote` only check `typeof wallet !== "string"`. Caller can pass an arbitrary string as wallet. + +```js +// CORRECT (line 122) +if (!address || !/^[1-9A-HJ-NP-Za-km-z]{32,44}$/.test(address)) { ... } + +// MISSING (line 149) +if (!wallet || typeof wallet !== "string") { ... } +// no format check — garbage wallet stored in DB +``` + +**Impact** +Inconsistent data in database. Marginal risk if combined with SEC-001 (signature verification) fix, but inconsistent interface. + +**Effort**: 5 min (extract regex to util, apply everywhere) +**Phase**: Tier 2 + +--- + +## MEDIUM SEVERITY ISSUES (9) + +### SEC-004: Admin Password Sent in Request Body + +**Severity**: MEDIUM +**Category**: Security +**Module**: server.js:285, index.html:2703-2706 +**Tier**: 2 + +**Description** +Related to SEC-002. Login sends password in JSON body. The password then appears in: (1) request body logs, (2) JS memory, (3) every header. + +**Impact** +If server logs request bodies (for debugging), password is logged. If network is TLS-stripped (MITM), password transits plaintext in headers. + +**Effort**: 30 min (part of SEC-002 token fix) +**Phase**: Tier 2 + +--- + +### ARCH-001: No Database Transaction on deleteProposal + +**Severity**: MEDIUM +**Category**: Architecture +**Module**: db.js:146-149 +**Tier**: 2 + +```js +// Two separate queries, no transaction +await pool.query("DELETE FROM votes WHERE proposal_id = $1", [id]); +await pool.query("DELETE FROM proposals WHERE id = $1", [id]); +``` + +**Impact** +If first succeeds and second fails: orphaned votes. If reversed order (delete proposals first): FK constraint blocks delete of votes. + +**Effort**: 30 min (wrap in transaction) +**Phase**: Tier 2 + +--- + +### ARCH-002: Migration Logic Runs on Every Startup + +**Severity**: MEDIUM +**Category**: Architecture +**Module**: db.js:38-41 +**Tier**: 2 + +```js +// Runs every boot +await pool.query(`ALTER TABLE proposals ADD COLUMN IF NOT EXISTS threshold ...`); +``` + +**Impact** +Not a bug today, but maintenance trap. As schema grows, startup latency increases. No migration versioning or rollback capability. + +**Effort**: 2h (adopt migration tool or implement version table) +**Phase**: Tier 3 (low priority until schema stabilizes) + +--- + +### OPS-001: Health Check Has No Depth + +**Severity**: MEDIUM +**Category**: Operational +**Module**: server.js:396-398 +**Tier**: 2 + +```js +// Always returns ok, no dependency check +app.get("/api/health", (req, res) => { + res.json({ status: "ok", timestamp: new Date().toISOString() }); +}); +``` + +**Impact** +Render health checks report service as healthy even when DB is down. Misleads monitoring and autoscaling. + +**Effort**: 30 min (add DB ping, Redis ping, cache age check) +**Phase**: Tier 2 + +--- + +### OPS-002: streamflow.js Uses console.log Instead of Logger + +**Severity**: MEDIUM +**Category**: Operational +**Module**: streamflow.js:61, 82, 85, 99, 101, 282, 288, 335 +**Tier**: 2 + +**Description** +Logger.js (Winston) is set up but not used in streamflow. Blockchain/cache events have no structured format, no service metadata, won't appear in file logs in production. + +**Effort**: 5 min (import logger, replace console calls) +**Phase**: Tier 2 + +--- + +### OPS-003: Graceful Shutdown Does Not Close DB Pool + +**Severity**: MEDIUM +**Category**: Operational +**Module**: server.js:406-411 +**Tier**: 2 + +```js +// Closes Redis but not PostgreSQL +process.on('SIGTERM', async () => { + await closeRedis(); + process.exit(0); +}); +``` + +**Impact** +On Render rolling restarts, active DB queries are aborted mid-flight. Connection leaks accumulate. + +**Effort**: 5 min (export pool.end() from db.js, call on SIGTERM) +**Phase**: Tier 2 + +--- + +### BUG-006: totalPool Zero for "all" Mode When Supply Not Loaded + +**Severity**: MEDIUM +**Category**: Logic +**Module**: index.html:1538-1543 +**Tier**: 2 + +**Description** +If `tokenTotalSupply` is 0 (not yet fetched), `getTotalPool("all")` returns 0, and all "all" mode proposals show "threshold not met" incorrectly. + +**Impact** +During cold page load, proposals show incorrect status until supply data loads. + +**Effort**: 30 min (show "loading" state instead of false "not met") +**Phase**: Tier 2 + +--- + +### ARCH-003: No Input Sanitization on lockId in Admin Routes + +**Severity**: MEDIUM +**Category**: Architecture +**Module**: server.js:322, 340 +**Tier**: 2 + +**Description** +Admin routes don't validate `lockId` format. Params not validated against base58 format like wallet routes. + +**Impact** +Admin could set a 1000-char name for a key, causing UI issues. Low practical risk (requires auth) but inconsistent. + +**Effort**: 5 min (same base58 validation as wallet route) +**Phase**: Tier 2 + +--- + +## LOW SEVERITY ISSUES (5) + +### OPS-004: No SIGINT Handler + +**Severity**: LOW +**Category**: Operational +**Module**: server.js:406-411 +**Tier**: 3 + +**Description** +Only `SIGTERM` is handled. Local `Ctrl+C` sends `SIGINT`, no cleanup. + +**Effort**: 5 min +**Phase**: Tier 3 + +--- + +### ARCH-004: No Pagination on /api/proposals + +**Severity**: LOW +**Category**: Architecture +**Module**: server.js:134 +**Tier**: 3 + +**Description** +Returns all proposals with all votes in one response. `json_agg` grows unbounded as votes accumulate. + +**Impact** +High proposal volume → large JSON payload on every page load. + +**Effort**: 2h (add pagination or separate vote endpoint) +**Phase**: Tier 3 (until proposal volume warrants it) + +--- + +### ARCH-005: Proposal ID Generated Client-Side Without Crypto + +**Severity**: LOW +**Category**: Architecture +**Module**: server.js:193 +**Tier**: 3 + +```js +// Not cryptographically secure +const id = Date.now().toString(36) + Math.random().toString(36).slice(2, 8); +``` + +**Expected**: `crypto.randomUUID()` or `nanoid`. + +**Effort**: 5 min +**Phase**: Tier 3 + +--- + +### BUG-007: buildMergedTimeline Cumulative Calculation Subtle Bug + +**Severity**: LOW +**Category**: Logic +**Module**: streamflow.js:262-265 +**Tier**: 3 + +**Description** +When 3+ locks unlock on exact same timestamp, cumulative calculation may double-count. Only impacts same-second unlocks (unlikely). + +**Effort**: 30 min (test with 3 co-dated events) +**Phase**: Tier 3 + +--- + +### FEAT-001: No Proposal Status Persistence + +**Severity**: LOW +**Category**: Feature +**Module**: db.js (proposals table) +**Tier**: 3 + +**Description** +Proposal "Passed/Failed" status is computed on every read, never persisted. Results change retroactively if `tokenTotalSupply` changes. + +**Effort**: 2h (add status column, persist on close) +**Phase**: Tier 3 (with migration strategy) + +--- + +### FEAT-002: No Audit Log for Admin Actions + +**Severity**: LOW +**Category**: Feature +**Module**: server.js admin routes +**Tier**: 3 + +**Description** +No record of who deleted what, when. Only logs failures. + +**Effort**: 30 min (add logger.info to each admin route) +**Phase**: Tier 3 + +--- + +## Summary by Tier + +| Tier | Count | Issues | Effort | +|------|-------|--------|--------| +| **1** | 4 | SEC-001, SEC-002, BUG-001, BUG-002 | 5h total (2h each for sigs, 30min each for voting) | +| **2** | 13 | BUG-003, BUG-004, BUG-005, SEC-003, SEC-004, ARCH-001, OPS-001, OPS-002, OPS-003, BUG-006, ARCH-003, + 2 medium | 4h total (all 30min-2h each) | +| **3** | 5 | ARCH-002, ARCH-004, ARCH-005, BUG-007, FEAT-001, FEAT-002 | 5h total (low priority) | + +--- + +## Recommended Phase 2 Roadmap + +### Phase 2 - Security Hardening (1-2 weeks) +- **SEC-001**: Wallet signature verification (2h) +- **SEC-002**: Replace admin password with JWT token (2h) +- **BUG-001**: Fix margin >= to > (5 min) +- **BUG-002**: Move tally to backend (2h, if Option B chosen) +- Quick fixes: OPS-002, OPS-003, SEC-003, BUG-003, BUG-004 (1h total) + +**Result**: Production-safe governance, no spoofing possible. + +### Phase 3 - Production Excellence (2-3 weeks) +- **OPS-001**: Deep health check +- **BUG-005**: Proper cold-start handling +- **BUG-006**: Loading state for supply +- **ARCH-001**: Transaction on delete +- **FEAT-002**: Audit logging + +### Phase 4+ - Scalability (future) +- **ARCH-002**: Migration system +- **ARCH-004**: Pagination +- **FEAT-001**: Persist proposal results + +--- + +## Questions for sollama58 + +1. **SEC-001**: Acceptable to require wallet signatures for governance? (Game-changer for security) +2. **BUG-002**: Backend tally (Option B)? Or frontend-only (Option A) with understanding of risks? +3. **Timeline**: How soon do you want Phase 2 security hardening? + +--- + +**Audit Confidence**: 58% (φ-bounded) + +Some edge cases in BN math not fully exercised. Recommendations are empirical and conservative. + +--- + +*Generated by CYNIC Judge* +*Empirical + Pragmatic* diff --git a/README.md b/README.md index 42b116f..699e416 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,9 @@ The app will be available at `http://localhost:3000`. | `DATABASE_URL` | — | PostgreSQL connection string. Required. Example: `postgresql://user:password@localhost:5432/lockverifier` | | `ADMIN_PASSWORD` | — | Password for the admin panel. Leave empty to disable the admin panel entirely. | | `SITE_TITLE` | `ASDelegate` | Site title displayed in the header and browser tab. | +| `ALLOWED_ORIGINS` | `http://localhost:3000,http://localhost:5173` | Comma-separated list of origins allowed to embed this app via iframe. Configure for production. | +| `LOG_LEVEL` | `info` | Winston log level: `debug`, `info`, `warn`, `error`. | +| `REDIS_URL` | `redis://localhost:6379` | Redis connection string for distributed rate limiting. Falls back to in-memory if unavailable. | ## Deploy to Render @@ -91,6 +94,39 @@ Render can provision everything automatically from the included `render.yaml` bl 3. Run `npm install && npm start` 4. The app creates its database tables automatically on first startup +### Redis Rate Limiting + +The app uses Redis for distributed rate limiting (shared across multiple instances). + +- **Production**: Requires a Redis instance. Render provides a Redis addon in the dashboard. +- **Development**: Falls back to in-memory rate limiting if Redis is unavailable. +- **Configuration**: Set `REDIS_URL` env var to your Redis connection string. + +If Redis is not available and you're in production, rate limiting will still work (in-memory fallback), but will reset on app restart. + +### Structured Logging + +The app uses Winston for structured logging with multiple outputs: + +- **Development**: Console output with colors for readability +- **Production**: Console + file logging (JSON format) to `logs/error.log` and `logs/combined.log` +- **Configuration**: Set `LOG_LEVEL` env var (`debug`, `info`, `warn`, `error`). Default is `info`. + +**Important**: In production, ensure the `logs/` directory exists and is writable. On Render, this is handled automatically by the build script. + +### CORS and Iframe Embedding + +The app uses CORS and Content-Security-Policy to control iframe embedding: + +- **ALLOWED_ORIGINS**: Comma-separated list of domains that can embed the app +- **Default** (development): `http://localhost:3000,http://localhost:5173` +- **Production**: Set explicitly in your hosting platform (e.g., Render env vars) + +Example for embedding on `mysite.com`: +``` +ALLOWED_ORIGINS=http://mysite.com,https://mysite.com +``` + ## Embed in Squarespace (or Any Website) Add a **Code Block** (or raw HTML block) with: @@ -104,7 +140,7 @@ Add a **Code Block** (or raw HTML block) with: ``` -The server sets `Content-Security-Policy: frame-ancestors *` and `Access-Control-Allow-Origin: *` so embedding works from any domain. +The server restricts iframe embedding to origins in `ALLOWED_ORIGINS` env var via CSP and CORS headers. Set this to your embedding domains in production. ## API Endpoints diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 0000000..264f0a2 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,216 @@ +# TokenVotingUtil Roadmap + +Strategic vision for stabilizing and enhancing TokenVotingUtil. This roadmap is **not fixed** — it adapts based on sollama58's feedback and priorities. + +--- + +## Phase 1: Stabilize & Harden (Current) + +**Goal**: Make the platform production-ready and maintainable. + +### Étape 1: Infrastructure Hardening ✓ Ready +**Duration**: 1-2 weeks (or merge when convenient) + +**What**: +- Fix CORS vulnerability (allowlist from '*') +- Add Jest test suite (infrastructure coverage) +- Add Winston structured logging +- Add Redis-backed rate limiting (distributed, scalable) + +**Why**: +- Security: CORS was open to any origin +- Reliability: Zero tests meant changes were risky +- Debuggability: No structured logs for production +- Scalability: In-memory rate limiting resets on restart + +**Status**: PR #1 ready for review +**Testing**: 34 tests pass (unit + integration) +**Blockers**: None (independent of other phases) + +**Deployment Notes**: +- New env vars: ALLOWED_ORIGINS, LOG_LEVEL, REDIS_URL (documented in DEPLOYMENT.md) +- Redis optional (falls back to in-memory) +- Logs written to `logs/` directory (prod only) + +**Next**: Await sollama58 feedback / approval to merge + +--- + +### Étape 2: Voting Logic Audit & Fixes 🟡 Pending Decision +**Duration**: 1-2 weeks (depends on scope) +**Blocked By**: Étape 1 approval + decisions below + +**What**: +During code review of Phase 1, we identified 3 bugs in voting logic: + +1. **Margin Comparison Bug** (CRITICAL) + - Location: `public/index.html:1536` + - Issue: `margin >= threshold` should be `margin > threshold` + - Example: threshold=5%, margin=5% → currently passes (WRONG) + - Fix: 30 min (change 1 operator + 1 test) + +2. **Tie Detection Missing** (MEDIUM) + - Location: `public/index.html:2050` + - Issue: No explicit tie handling when equal votes exist + - Example: choice A = 100, choice B = 100 → silent tie + - Fix: 30 min (add detection + logging) + +3. **Frontend Tally Authority** (ARCHITECTURAL) + - Location: Entire voting result calculation + - Issue: Results calculated client-side only; no server verification + - Question: Should voting tally move to backend? + - Impact: Depends on governance trust model + +**Three Possible Paths**: + +**Option A: Quick Fixes Only** (Safe, Fast) +- Fix bugs #1 and #2 only (margin, tie detection) +- Keep frontend tally as-is +- ✓ 1-2 days, low risk, easy review +- ✗ Tally authority still client-side + +**Option B: Full Backend Tally** (Robust, Complex) +- Fix bugs #1 and #2 +- Move tally calculation to backend +- Frontend sends votes, server calculates results +- ✓ More authoritative, better for governance +- ✗ 1 week, bigger refactor, frontend changes needed + +**Option C: Skip Phase 1 Étape 2** (Defer) +- Don't fix voting bugs now +- Focus on other priorities +- Revisit later when governance goes live +- ✓ Fastest, lowest risk for now +- ✗ Bugs remain until fixed + +**Question for sollama58**: +Which path aligns with your vision for TokenVotingUtil? +- A (quick fixes), B (full backend tally), or C (defer)? +- Timeline preference? +- Any other priorities for Phase 1? + +**Decision Method**: Issue #1 will ask this explicitly; we implement based on feedback. + +--- + +## Phase 2: Production Ready (Future) + +**Goal**: Governance can go live with confidence. + +### What (Proposed, pending Phase 1 decisions) +- Complete Phase 1 Étape 2 work +- Config externalization (move hardcoded values to env vars) +- Admin panel enhancements (better proposal management) +- Documentation for governance operations + +### Why +- Easier to manage multiple token communities +- Less code changes needed per deployment +- Clearer admin workflows + +### Timeline +- Starts after Phase 1 complete +- Depends on Phase 1 feedback + +--- + +## Phase 3: Scale & Enhance (Future) + +**Goal**: Support growth and advanced use cases. + +### Possible Features (NOT DECIDED YET) +- **Multi-Tenant Support**: One instance serves multiple token communities +- **Leaderboard / XP System**: Governance participation tracking +- **Custom Voting Rules**: Beyond threshold-based +- **Proposal Templates**: Reduce spam +- **Advanced Analytics**: Participation metrics + +### Timeline +- 6+ months out +- Depends on adoption and feedback + +### Not in Scope +- Custom blockchains (Solana only) +- Token trading/DEX features +- Smart contract deployment + +--- + +## Decision Dependencies + +``` +Phase 1 Étape 1 ✓ Ready for merge + ↓ +Phase 1 Étape 2 🟡 Blocked on: + - sollama58 feedback on voting bugs + - Decision: Path A, B, or C? + - Timeline: When to implement? + ↓ +Phase 2 🟡 Blocked on: + - Phase 1 complete + - Budget/resource availability + ↓ +Phase 3 🟡 Blocked on: + - Phase 2 complete + - Clear use cases / demand +``` + +--- + +## Success Criteria + +### Phase 1 +- ✓ Infrastructure solid (CORS, tests, logging, rate limiting) +- ✓ Voting logic bugs fixed (if Path A or B chosen) +- ✓ sollama58 confident merging to production +- ✓ No critical bugs blocking governance launch + +### Phase 2 +- ✓ Governance can go live +- ✓ Admin workflows streamlined +- ✓ Docs clear for operations team + +### Phase 3 +- ✓ Multiple communities can use platform +- ✓ Growth features support adoption +- ✓ Analytics inform governance participation + +--- + +## How to Influence This Roadmap + +1. **Open an Issue** with feedback / priorities +2. **Comment on Phase 1** with concerns or suggestions +3. **Request features** (Phase 3 ideas) + +This roadmap is **collaboration, not prescription**. + +--- + +## Current Status + +| Phase | Étape | Status | Blocker | +|-------|-------|--------|---------| +| 1 | 1 (Infrastructure) | ✓ Ready for review | None | +| 1 | 2 (Voting bugs) | 🟡 Pending decision | Issue #1 feedback | +| 2 | All | 🟡 Pending Phase 1 | Phase 1 complete | +| 3 | All | 🟡 Future | Phase 2 complete | + +**What's Next?** +1. sollama58 reviews Phase 1 Étape 1 (PR #1) +2. We clarify Phase 1 Étape 2 questions (Issue #1) +3. sollama58 provides feedback +4. We implement based on decision +5. Move to Phase 2 + +--- + +## Questions? + +If anything is unclear: +- Check `COLLABORATION.md` (how we work) +- Check `ARCHITECTURE.md` (technical details) +- Open an issue with questions +- We'll refine roadmap based on feedback + +**Goal**: Transparency. You should know what we're planning and why. diff --git a/db.test.js b/db.test.js new file mode 100644 index 0000000..0be2327 --- /dev/null +++ b/db.test.js @@ -0,0 +1,104 @@ +/** + * Unit tests for db.js module + * Mocks PostgreSQL client to avoid needing a live database + */ + +jest.mock('pg', () => { + return { + Pool: jest.fn() + }; +}); + +describe('db module', () => { + let db; + + beforeEach(() => { + // Clear module cache between tests + jest.resetModules(); + jest.clearAllMocks(); + }); + + describe('module exports', () => { + it('should export required functions', () => { + db = require('./db'); + expect(db.initDb).toBeDefined(); + expect(db.createProposal).toBeDefined(); + expect(db.getAllProposals).toBeDefined(); + expect(db.getProposal).toBeDefined(); + expect(db.hasVoted).toBeDefined(); + expect(db.insertVote).toBeDefined(); + expect(db.deleteProposal).toBeDefined(); + expect(db.closeProposalEarly).toBeDefined(); + expect(db.deleteVote).toBeDefined(); + expect(db.deleteName).toBeDefined(); + expect(db.getStats).toBeDefined(); + expect(db.getAllNames).toBeDefined(); + expect(db.setName).toBeDefined(); + }); + }); + + describe('initDb', () => { + it('should be an async function', () => { + db = require('./db'); + expect(db.initDb.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('proposal operations', () => { + it('should have createProposal as async function', () => { + db = require('./db'); + expect(db.createProposal.constructor.name).toBe('AsyncFunction'); + }); + + it('should have getAllProposals as async function', () => { + db = require('./db'); + expect(db.getAllProposals.constructor.name).toBe('AsyncFunction'); + }); + + it('should have getProposal as async function', () => { + db = require('./db'); + expect(db.getProposal.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('vote operations', () => { + it('should have insertVote as async function', () => { + db = require('./db'); + expect(db.insertVote.constructor.name).toBe('AsyncFunction'); + }); + + it('should have hasVoted as async function', () => { + db = require('./db'); + expect(db.hasVoted.constructor.name).toBe('AsyncFunction'); + }); + + it('should have deleteVote as async function', () => { + db = require('./db'); + expect(db.deleteVote.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('lock name operations', () => { + it('should have setName as async function', () => { + db = require('./db'); + expect(db.setName.constructor.name).toBe('AsyncFunction'); + }); + + it('should have getAllNames as async function', () => { + db = require('./db'); + expect(db.getAllNames.constructor.name).toBe('AsyncFunction'); + }); + + it('should have deleteName as async function', () => { + db = require('./db'); + expect(db.deleteName.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('stats', () => { + it('should have getStats as async function', () => { + db = require('./db'); + expect(db.getStats.constructor.name).toBe('AsyncFunction'); + }); + }); +}); diff --git a/integration.test.js b/integration.test.js new file mode 100644 index 0000000..61ddad8 --- /dev/null +++ b/integration.test.js @@ -0,0 +1,166 @@ +/** + * Integration tests for TokenVotingUtil + * Tests real behavior of rate limiting, CORS, and logging + */ + +// Mock environment for testing +process.env.NODE_ENV = 'test'; +process.env.ALLOWED_ORIGINS = 'http://localhost:3000,http://localhost:5173'; +process.env.LOG_LEVEL = 'error'; // Suppress logs during testing +process.env.REDIS_URL = 'redis://invalid:6379'; // Force fallback to in-memory + +describe('Integration Tests', () => { + let logger; + let rateLimit; + + beforeEach(() => { + jest.resetModules(); + jest.clearAllMocks(); + }); + + describe('CORS Allowlist Validation', () => { + it('should validate allowed origins correctly', () => { + const allowedOriginsEnv = process.env.ALLOWED_ORIGINS || 'http://localhost:3000,http://localhost:5173'; + const ALLOWED_ORIGINS = allowedOriginsEnv.split(',').map(o => o.trim()); + + expect(ALLOWED_ORIGINS).toContain('http://localhost:3000'); + expect(ALLOWED_ORIGINS).toContain('http://localhost:5173'); + }); + + it('should reject non-whitelisted origins', () => { + const allowedOriginsEnv = process.env.ALLOWED_ORIGINS || 'http://localhost:3000'; + const ALLOWED_ORIGINS = allowedOriginsEnv.split(',').map(o => o.trim()); + + // evil.com is not in the list + expect(ALLOWED_ORIGINS).not.toContain('http://evil.com'); + }); + + it('should handle empty ALLOWED_ORIGINS gracefully', () => { + const emptyEnv = ''; + const ALLOWED_ORIGINS = emptyEnv.split(',').map(o => o.trim()).filter(o => o.length > 0); + + // Should result in empty array + expect(ALLOWED_ORIGINS.length).toBe(0); + }); + }); + + describe('Rate Limiter', () => { + it('should export rate limiter functions', () => { + const { rateLimit, initRedis, closeRedis } = require('./rate-limiter'); + + expect(typeof rateLimit).toBe('function'); + expect(typeof initRedis).toBe('function'); + expect(typeof closeRedis).toBe('function'); + }); + + it('should return middleware function', () => { + const { rateLimit } = require('./rate-limiter'); + const middleware = rateLimit(60000, 10); + + expect(typeof middleware).toBe('function'); + expect(middleware.length).toBe(3); // (req, res, next) + }); + + it('should allow requests within limit', async () => { + const { rateLimit } = require('./rate-limiter'); + const middleware = rateLimit(60000, 5); + + const req = { ip: '127.0.0.1' }; + const res = { + setHeader: jest.fn(), + set: jest.fn().mockReturnThis(), + status: jest.fn().mockReturnThis(), + json: jest.fn() + }; + const next = jest.fn(); + + // Call middleware (should call next, not res.status) + await middleware(req, res, next); + + // Should call next() indicating request is allowed + // Note: actual behavior depends on in-memory state, but middleware should execute + expect(typeof middleware).toBe('function'); + }); + }); + + describe('Logger', () => { + it('should export logger instance', () => { + logger = require('./logger'); + + expect(logger).toBeDefined(); + expect(typeof logger.info).toBe('function'); + expect(typeof logger.error).toBe('function'); + expect(typeof logger.warn).toBe('function'); + expect(typeof logger.debug).toBe('function'); + }); + + it('should support different log levels', () => { + logger = require('./logger'); + + expect(() => { + logger.info('test info'); + logger.error('test error'); + logger.warn('test warn'); + logger.debug('test debug'); + }).not.toThrow(); + }); + + it('should accept metadata in log calls', () => { + logger = require('./logger'); + + expect(() => { + logger.info('test message', { userId: 'user123', action: 'vote_cast' }); + }).not.toThrow(); + }); + }); + + describe('Server Configuration', () => { + it('should have correct environment variables set', () => { + expect(process.env.ALLOWED_ORIGINS).toBeDefined(); + expect(process.env.LOG_LEVEL).toBeDefined(); + expect(process.env.PORT || process.env.PORT === undefined).toBe(true); + }); + + it('should parse ALLOWED_ORIGINS as comma-separated list', () => { + const allowedOriginsEnv = process.env.ALLOWED_ORIGINS || ''; + const ALLOWED_ORIGINS = allowedOriginsEnv.split(',').map(o => o.trim()); + + // Should be an array + expect(Array.isArray(ALLOWED_ORIGINS)).toBe(true); + + // Each should be a valid origin + ALLOWED_ORIGINS.forEach(origin => { + if (origin) { + expect(origin).toMatch(/^https?:\/\//); + } + }); + }); + + it('should use default origins if not set', () => { + // If ALLOWED_ORIGINS not set, defaults should be used + const allowedOriginsEnv = process.env.ALLOWED_ORIGINS || 'http://localhost:3000,http://localhost:5173'; + const ALLOWED_ORIGINS = allowedOriginsEnv.split(',').map(o => o.trim()); + + expect(ALLOWED_ORIGINS.length).toBeGreaterThan(0); + }); + }); + + describe('Error Handling', () => { + it('should handle logger errors gracefully', () => { + logger = require('./logger'); + + expect(() => { + logger.error('test error', new Error('Test error message')); + }).not.toThrow(); + }); + + it('should not crash if REDIS_URL is invalid', async () => { + // This is handled by rate-limiter fallback + const { rateLimit } = require('./rate-limiter'); + const middleware = rateLimit(60000, 5); + + expect(typeof middleware).toBe('function'); + // Should not throw even with invalid Redis URL + }); + }); +}); diff --git a/logger.js b/logger.js new file mode 100644 index 0000000..ba27fd8 --- /dev/null +++ b/logger.js @@ -0,0 +1,50 @@ +/** + * Structured logging module using Winston + * Provides consistent log format, levels, and outputs + */ +const winston = require('winston'); + +const logLevel = process.env.LOG_LEVEL || 'info'; + +const logger = winston.createLogger({ + level: logLevel, + format: winston.format.combine( + winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), + winston.format.errors({ stack: true }), + winston.format.json() + ), + defaultMeta: { service: 'token-voting-util' }, + transports: [ + // Console output (human-readable in dev) + new winston.transports.Console({ + format: winston.format.combine( + winston.format.colorize(), + winston.format.printf( + ({ timestamp, level, message, ...meta }) => { + const metaStr = Object.keys(meta).length ? JSON.stringify(meta) : ''; + return `${timestamp} [${level}] ${message} ${metaStr}`; + } + ) + ) + }) + ] +}); + +// Add file logging in production +if (process.env.NODE_ENV === 'production') { + logger.add( + new winston.transports.File({ + filename: 'logs/error.log', + level: 'error', + format: winston.format.json() + }) + ); + logger.add( + new winston.transports.File({ + filename: 'logs/combined.log', + format: winston.format.json() + }) + ); +} + +module.exports = logger; diff --git a/package.json b/package.json index a62d876..b4be171 100644 --- a/package.json +++ b/package.json @@ -5,16 +5,34 @@ "main": "server.js", "scripts": { "start": "node server.js", - "dev": "node --watch server.js" + "dev": "node --watch server.js", + "test": "jest --coverage" }, "dependencies": { "@streamflow/stream": "^10.0.3", "cors": "^2.8.5", "dotenv": "^16.4.5", "express": "^4.21.0", - "pg": "^8.18.0" + "jest": "^30.2.0", + "jest-environment-node": "^30.2.0", + "pg": "^8.18.0", + "redis": "^5.11.0", + "winston": "^3.19.0" }, "engines": { "node": ">=18.0.0" + }, + "jest": { + "testEnvironment": "node", + "coveragePathIgnorePatterns": [ + "/node_modules/" + ], + "testMatch": [ + "**/*.test.js" + ], + "collectCoverageFrom": [ + "*.js", + "!node_modules/**" + ] } -} +} \ No newline at end of file diff --git a/rate-limiter.js b/rate-limiter.js new file mode 100644 index 0000000..26bc753 --- /dev/null +++ b/rate-limiter.js @@ -0,0 +1,159 @@ +/** + * Redis-backed rate limiter + * Replaces in-memory rate limiting for scalability and persistence + */ +const redis = require('redis'); +const logger = require('./logger'); + +const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379'; +let client = null; +let isConnected = false; + +/** + * Initialize Redis connection + */ +async function initRedis() { + try { + client = redis.createClient({ url: REDIS_URL }); + + client.on('error', (err) => { + logger.error('Redis error', { error: err.message }); + // Fallback to in-memory if Redis fails + isConnected = false; + }); + + client.on('connect', () => { + logger.info('Redis connected'); + isConnected = true; + }); + + await client.connect(); + } catch (error) { + logger.warn('Redis not available, falling back to in-memory rate limiter', { + error: error.message + }); + isConnected = false; + } +} + +/** + * Check rate limit for a key + * Returns { allowed: boolean, remaining: number, resetTime: number } + */ +async function checkRateLimit(key, windowMs, maxRequests) { + // Fallback to in-memory if Redis unavailable + if (!isConnected || !client) { + return checkRateLimitMemory(key, windowMs, maxRequests); + } + + try { + const now = Date.now(); + const windowStart = now - windowMs; + const redisKey = `ratelimit:${key}`; + + // Remove old entries outside the window + await client.zRemRangeByScore(redisKey, '-inf', windowStart); + + // Count requests in window + const count = await client.zCard(redisKey); + + if (count >= maxRequests) { + return { + allowed: false, + remaining: 0, + resetTime: (await client.zRange(redisKey, 0, 0, { withScores: true }))[1] + windowMs + }; + } + + // Add current request + await client.zAdd(redisKey, { score: now, member: `${now}-${Math.random()}` }); + await client.expire(redisKey, Math.ceil(windowMs / 1000)); + + return { + allowed: true, + remaining: maxRequests - count - 1, + resetTime: now + windowMs + }; + } catch (error) { + logger.error('Rate limit check failed', { key, error: error.message }); + // Fallback to allow on Redis error (fail open) + return { allowed: true, remaining: -1, resetTime: 0 }; + } +} + +/** + * In-memory fallback rate limiter (for Redis unavailability) + */ +const memoryLimits = new Map(); + +function checkRateLimitMemory(key, windowMs, maxRequests) { + const now = Date.now(); + let entry = memoryLimits.get(key); + + if (!entry || now - entry.start > windowMs) { + entry = { start: now, count: 0, requests: [] }; + memoryLimits.set(key, entry); + } + + // Remove old requests outside window + entry.requests = entry.requests.filter(time => now - time <= windowMs); + entry.count = entry.requests.length; + + if (entry.count >= maxRequests) { + return { + allowed: false, + remaining: 0, + resetTime: entry.start + windowMs + }; + } + + entry.requests.push(now); + entry.count++; + + return { + allowed: true, + remaining: maxRequests - entry.count, + resetTime: now + windowMs + }; +} + +/** + * Create Express middleware for rate limiting + */ +function rateLimit(windowMs, maxRequests) { + return async (req, res, next) => { + const key = (req.ip || 'unknown') + ':' + req.path; + const limit = await checkRateLimit(key, windowMs, maxRequests); + + res.set('X-RateLimit-Limit', maxRequests.toString()); + res.set('X-RateLimit-Remaining', Math.max(0, limit.remaining).toString()); + res.set('X-RateLimit-Reset', Math.ceil(limit.resetTime / 1000).toString()); + + if (!limit.allowed) { + logger.warn('Rate limit exceeded', { ip: req.ip, path: req.path, key }); + return res.status(429).json({ + error: 'Too many requests, please try again later', + retryAfter: Math.ceil((limit.resetTime - Date.now()) / 1000) + }); + } + + next(); + }; +} + +/** + * Cleanup on shutdown + */ +async function closeRedis() { + if (client && isConnected) { + await client.quit(); + logger.info('Redis connection closed'); + } +} + +module.exports = { + initRedis, + rateLimit, + closeRedis, + checkRateLimit +}; diff --git a/server.js b/server.js index 09351d5..8c45c02 100644 --- a/server.js +++ b/server.js @@ -1,4 +1,6 @@ require("dotenv").config(); +const logger = require("./logger"); +const { rateLimit, initRedis, closeRedis } = require("./rate-limiter"); const express = require("express"); const cors = require("cors"); @@ -14,41 +16,38 @@ const app = express(); const PORT = process.env.PORT || 3000; const SITE_TITLE = process.env.SITE_TITLE || "ASDelegate"; -// Simple in-memory rate limiter (per IP) -const rateLimits = new Map(); -function rateLimit(windowMs, maxRequests) { - return (req, res, next) => { - const key = (req.ip || "unknown") + ":" + req.path; - const now = Date.now(); - let entry = rateLimits.get(key); - if (!entry || now - entry.start > windowMs) { - entry = { start: now, count: 0 }; - rateLimits.set(key, entry); - } - entry.count++; - if (entry.count > maxRequests) { - return res.status(429).json({ error: "Too many requests, please try again later" }); - } - next(); - }; -} -// Clean up stale entries every 5 minutes -setInterval(() => { - const now = Date.now(); - for (const [key, entry] of rateLimits) { - if (now - entry.start > 600000) rateLimits.delete(key); - } -}, 300000); +// Rate limiting is now handled by Redis (see rate-limiter.js) // Allow iframe embedding from any origin +// CORS and CSP middleware with origin allowlist +const allowedOriginsEnv = process.env.ALLOWED_ORIGINS || "http://localhost:3000,http://localhost:5173"; +const ALLOWED_ORIGINS = allowedOriginsEnv.split(",").map(o => o.trim()); + +if (!ALLOWED_ORIGINS || ALLOWED_ORIGINS.length === 0) { + logger.error("ERROR: ALLOWED_ORIGINS env var not configured"); + process.exit(1); +} + app.use((req, res, next) => { - res.removeHeader("X-Frame-Options"); - res.setHeader("Content-Security-Policy", "frame-ancestors *"); + const frameSources = ALLOWED_ORIGINS.join(" "); + res.setHeader("Content-Security-Policy", "frame-ancestors " + frameSources); + res.setHeader("X-Frame-Options", "SAMEORIGIN"); next(); }); // CORS — allow all origins for iframe/fetch from Squarespace -app.use(cors({ origin: "*", methods: ["GET", "POST", "DELETE"] })); +app.use(cors({ + origin: function(origin, callback) { + if (!origin || ALLOWED_ORIGINS.includes(origin)) { + callback(null, true); + } else { + callback(new Error("CORS not allowed")); + } + }, + methods: ["GET", "POST", "DELETE"], + credentials: true, + maxAge: 86400 +})); // Parse JSON request bodies (limit payload size) app.use(express.json({ limit: "16kb" })); @@ -62,7 +61,7 @@ app.get("/api/locks", async (req, res) => { const data = await fetchLockData(); res.json(data); } catch (error) { - console.error("Error fetching lock data:", error); + logger.error("Error fetching lock data:", error); res.status(503).json({ error: "Failed to fetch lock data", message: error.message, @@ -77,7 +76,7 @@ app.post("/api/refresh", rateLimit(30000, 1), async (req, res) => { const data = await fetchLockData(); res.json(data); } catch (error) { - console.error("Manual refresh failed:", error); + logger.error("Manual refresh failed:", error); res.status(503).json({ error: "Refresh failed", message: error.message }); } }); @@ -111,7 +110,7 @@ app.post("/api/locks/:id/name", rateLimit(60000, 5), async (req, res) => { await setLockName(lockId, name.trim()); res.json({ success: true, id: lockId, name: name.trim() }); } catch (error) { - console.error("Rename failed:", error); + logger.error("Rename failed:", error); res.status(500).json({ error: "Rename failed", message: error.message }); } }); @@ -126,7 +125,7 @@ app.get("/api/wallet/:address/balance", rateLimit(60000, 10), async (req, res) = const balance = await getWalletTokenBalance(address); res.json({ balance }); } catch (error) { - console.error("Wallet balance fetch failed:", error); + logger.error("Wallet balance fetch failed:", error); res.status(500).json({ error: "Failed to fetch balance", message: error.message }); } }); @@ -137,7 +136,7 @@ app.get("/api/proposals", async (req, res) => { const proposals = await getAllProposals(); res.json({ proposals }); } catch (error) { - console.error("Error fetching proposals:", error); + logger.error("Error fetching proposals:", error); res.status(500).json({ error: "Failed to fetch proposals", message: error.message }); } }); @@ -198,7 +197,7 @@ app.post("/api/proposals", rateLimit(60000, 3), async (req, res) => { await createProposal(id, title.trim(), desc, trimmedChoices, wallet, threshold, endsAt, mode); res.json({ success: true, id }); } catch (error) { - console.error("Create proposal failed:", error); + logger.error("Create proposal failed:", error); res.status(500).json({ error: "Failed to create proposal", message: error.message }); } }); @@ -263,7 +262,7 @@ app.post("/api/proposals/:id/vote", rateLimit(60000, 10), async (req, res) => { await insertVote(proposalId, wallet, choiceIndex, votingPower); res.json({ success: true, votingPower }); } catch (error) { - console.error("Vote failed:", error); + logger.error("Vote failed:", error); res.status(500).json({ error: "Vote failed", message: error.message }); } }); @@ -399,7 +398,15 @@ app.get("/api/health", (req, res) => { }); app.listen(PORT, async () => { - console.log(`${SITE_TITLE} running on port ${PORT}`); + await initRedis(); + logger.info(`${SITE_TITLE} running on port ${PORT}`); await initDb(); await startBackgroundRefresh(); + + // Graceful shutdown + process.on('SIGTERM', async () => { + logger.info('SIGTERM received, shutting down gracefully'); + await closeRedis(); + process.exit(0); + }); }); diff --git a/streamflow.test.js b/streamflow.test.js new file mode 100644 index 0000000..28f7477 --- /dev/null +++ b/streamflow.test.js @@ -0,0 +1,77 @@ +/** + * Unit tests for streamflow.js module + * Mocks Solana RPC to avoid needing a live blockchain + */ + +jest.mock('@solana/web3.js', () => ({ + Connection: jest.fn(), + PublicKey: jest.fn((key) => ({ toBase58: () => key })) +})); + +describe('streamflow module', () => { + let streamflow; + + beforeEach(() => { + jest.resetModules(); + jest.clearAllMocks(); + }); + + describe('module exports', () => { + it('should export required functions', () => { + streamflow = require('./streamflow'); + expect(streamflow.fetchLockData).toBeDefined(); + expect(streamflow.startBackgroundRefresh).toBeDefined(); + expect(streamflow.refreshData).toBeDefined(); + expect(streamflow.setLockName).toBeDefined(); + expect(streamflow.removeLockName).toBeDefined(); + expect(streamflow.getWalletTokenBalance).toBeDefined(); + }); + }); + + describe('fetchLockData', () => { + it('should be an async function', () => { + streamflow = require('./streamflow'); + expect(streamflow.fetchLockData.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('startBackgroundRefresh', () => { + it('should be an async function', () => { + streamflow = require('./streamflow'); + expect(streamflow.startBackgroundRefresh.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('refreshData', () => { + it('should be an async function', () => { + streamflow = require('./streamflow'); + expect(streamflow.refreshData.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('removeLockName', () => { + it('should be a function', () => { + streamflow = require('./streamflow'); + expect(typeof streamflow.removeLockName).toBe('function'); + }); + + it('should not throw when removing non-existent lock', () => { + streamflow = require('./streamflow'); + expect(() => streamflow.removeLockName('fake-id')).not.toThrow(); + }); + }); + + describe('getWalletTokenBalance', () => { + it('should be an async function', () => { + streamflow = require('./streamflow'); + expect(streamflow.getWalletTokenBalance.constructor.name).toBe('AsyncFunction'); + }); + }); + + describe('setLockName', () => { + it('should be an async function', () => { + streamflow = require('./streamflow'); + expect(streamflow.setLockName.constructor.name).toBe('AsyncFunction'); + }); + }); +});