diff --git a/.github/workflows/security-assessment.yml b/.github/workflows/security-assessment.yml index 178a5c5..9cea4a9 100644 --- a/.github/workflows/security-assessment.yml +++ b/.github/workflows/security-assessment.yml @@ -37,6 +37,7 @@ jobs: - name: Install dependencies run: npm ci + - name: Start server in background run: | npm start & @@ -84,7 +85,7 @@ jobs: run: | echo "Starting security assessment..." - # Run the assessment + # Run the assessment (original behavior without full V2 scan integration) node security/runAssessment.js # Find the latest generated JSON report diff --git a/.github/workflows/vulnerability-scan.yml b/.github/workflows/vulnerability-scan.yml new file mode 100644 index 0000000..e0786e3 --- /dev/null +++ b/.github/workflows/vulnerability-scan.yml @@ -0,0 +1,102 @@ +name: Manual Vulnerability & Test Scan + +on: + workflow_dispatch: + inputs: + run_tests: + description: 'Set to true to run unit tests (may require DB). Default: false' + required: false + default: 'false' + fail_on_critical: + description: 'If true, the job will fail when the vulnerability_report.json contains CRITICAL issues. Default: false' + required: false + default: 'false' + +env: + NODE_VERSION: '20' + +jobs: + vulnerability-scan: + name: Run vulnerability scans and tests (manual) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'npm' + cache-dependency-path: package-lock.json + + - name: Install npm dependencies + run: npm ci + - name: Setup Python 3 + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Prepare Scanner Environment (V2) + run: | + node scripts/prepareScanner.js + if [ ! -d Vulnerability_Tool_V2/venv ]; then + echo "Scanner venv not created (Python missing or script skipped). Using system python."; + fi + + - name: Run unit tests (mocha) + if: ${{ github.event.inputs.run_tests == 'true' }} + run: | + echo "run_tests was set to true — running unit tests" + if npm run | grep -q "test:unit"; then + npm run test:unit + elif npm run | grep -q "test:rce"; then + npm run test:rce || true + else + echo "No test script found (test:unit/test:rce). Skipping tests."; + fi + + - name: Run npm audit and save JSON + run: | + npm audit --json > npm_audit.json || true + + - name: Run Vulnerability_Tool_V2 - JSON output + run: | + PYEXEC="python3" + if [ -f Vulnerability_Tool_V2/venv/bin/python ]; then PYEXEC="Vulnerability_Tool_V2/venv/bin/python"; fi + if [ -f Vulnerability_Tool_V2/scanner_v2.py ]; then + $PYEXEC Vulnerability_Tool_V2/scanner_v2.py --target . --format json --output vulnerability_report.json || true + else + echo "Vulnerability_Tool_V2/scanner_v2.py not found" > vulnerability_report.json + fi + + - name: Run Vulnerability_Tool_V2 - HTML output + run: | + PYEXEC="python3" + if [ -f Vulnerability_Tool_V2/venv/bin/python ]; then PYEXEC="Vulnerability_Tool_V2/venv/bin/python"; fi + if [ -f Vulnerability_Tool_V2/scanner_v2.py ]; then + $PYEXEC Vulnerability_Tool_V2/scanner_v2.py --target . --format html --output vulnerability_report.html || true + else + echo "Vulnerability_Tool_V2/scanner_v2.py not found" > vulnerability_report.html + fi + + - name: Collect generated reports + run: | + ls -la || true + echo "Collected artifacts:"; ls -la *.html *.txt npm_audit.json || true + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: vulnerability-scan-reports + path: | + vulnerability_report.json + vulnerability_report.html + vulnerability_tool_report.txt + npm_audit.json + + - name: Fail on critical findings (optional) + if: ${{ github.event.inputs.fail_on_critical == 'true' }} + run: | + echo "Checking vulnerability_report.json for CRITICAL findings..." + python3 scripts/ci_check_vuln.py diff --git a/.gitignore b/.gitignore index b748afe..174f3e0 100644 --- a/.gitignore +++ b/.gitignore @@ -149,4 +149,33 @@ __pycache__/ # Ignore generated security assessment reports /security/reports/ -``` \ No newline at end of file +``` + +# Python virtual environments +venv/ +.env/ +__pycache__/ +*.pyc + +# Local virtualenv created during testing +.venv/ + +# pytest cache +.pytest_cache/ + +# pipenv +Pipfile.lock + +# macOS system files +.DS_Store + +# VS Code settings +.vscode/ + +# Logs and temp files +*.log +*.tmp + +# Vulnerability Scanner V2 reports - generated artifacts +Vulnerability_Tool_V2/reports/ +Vulnerability_Tool_V2/reports/* diff --git a/README.md b/README.md index 2ebc2bd..3f7d043 100644 --- a/README.md +++ b/README.md @@ -11,15 +11,18 @@ git clone https://github.com/Gopher-Industries/Nutrihelp-api ```bash cd Nutrihelp-api ``` -4. Install the required dependencies (including python dependencies): +4. Install dependencies (runs automated bootstrap via npm postinstall): ```bash npm install -pip install -r requirements.txt -npm install node-fetch -npm install --save-dev jest supertest ``` -5. Contact a project maintainer to get the `.env` file that contains the necessary environment variables and place it in the root of the project directory. -6. Start the server: + What happens automatically: + - Node dependencies installed + - Environment bootstrap runs (`scripts/bootstrap.js --mode=postinstall`) + - If no `.env` exists a minimal placeholder is generated (internal team must replace with real values) + - Vulnerability scanner virtual environment prepared if Python 3 is available + - Environment validation runs (warnings only in postinstall mode) + +3. Start the server: ```bash npm start ``` @@ -57,3 +60,31 @@ npx jest .\test\healthNews.test.js /\ Please refer to the "PatchNotes_VersionControl" file for /\ /\ recent updates and changes made through each version. /\ + + +## CI: Manual Vulnerability & Test Scan (V2 Aligned) + +This repository includes a manual GitHub Actions workflow that runs the Vulnerability Scanner (V2) and optional tests. + +How to run +- Open the repository on GitHub and go to the Actions tab. +- Select the workflow named `Manual Vulnerability & Test Scan`. +- Click the `Run workflow` button. + +Inputs +- `run_tests` (default: `false`) — set to `true` to run unit tests (`npm run test:unit`). Tests may require a database or other services; use with caution. +- `fail_on_critical` (default: `false`) — set to `true` to make the job fail when the scanner JSON report contains one or more `CRITICAL` findings. + +Artifacts +- `vulnerability-scan-reports` (artifact bundle) — contains: + - `vulnerability_report.json` — machine-readable scan results + - `vulnerability_report.html` — human-friendly HTML report (if HTML rendering succeeds) + - `vulnerability_tool_report.txt` — legacy/auxiliary scanner output (if generated) + - `npm_audit.json` — result of `npm audit --json` + +Notes and recommendations +- The scanner excludes internal tool directories and common noisy paths (for example `Vulnerability_Tool_V2`, legacy `Vulnerability_Tool`, `node_modules`, test caches). +- If you enable `run_tests`, ensure the required environment (DB, credentials) is available to avoid noisy failures. +- Use `fail_on_critical=true` for gating releases or running stricter checks in CI; keep it `false` for quick, informational scans. + + diff --git a/VULN_CI_MIGRATION_TASK.md b/VULN_CI_MIGRATION_TASK.md new file mode 100644 index 0000000..697a914 --- /dev/null +++ b/VULN_CI_MIGRATION_TASK.md @@ -0,0 +1,82 @@ +# Vulnerability Scanner CI Migration Task (V1.4 -> V2.0) + +This document is a handover for the next developer to migrate `.github/workflows/security.yml` from invoking +`Vulnerability_Tool/Vulnerability_Scanner_V1.4.py` to using `Vulnerability_Tool_V2/scanner_v2.py`. + +Goal +- Run the V2.0 scanner in GitHub Actions to produce human-readable HTML (recommended) and/or JSON reports and + upload them as artifacts. +- Keep reproducibility by preferring the repository-provided venv setup (`setup_venv.sh`) and consider caching to + reduce CI time. + +Acceptance criteria +1. Actions produces a report file (HTML or JSON) under `Vulnerability_Tool_V2/reports/` and uploads it as an artifact. + Suggested filename: `security_report_${{ github.sha }}.html`. +2. Decide and document whether CRITICAL findings should fail the CI job (i.e. block PRs) and reflect that decision in + the workflow (comments or parameters). +3. Document performance / caching recommendations (for example, use `actions/cache` to cache pip wheels or pip cache + directories). + +Key information +- V2 CLI entry: `Vulnerability_Tool_V2/scanner_v2.py`. Required argument: `--target` (target directory). Optional flags: + `--format` (json|html|summary), `--output` (write output file), and `--verbose`. +- Recommended: use the repo script to create a virtual environment: `Vulnerability_Tool_V2/setup_venv.sh` which will + create `Vulnerability_Tool_V2/venv` and install dependencies from `requirements.txt`. + +Recommended implementation (preferred: use repository venv) + +Replace the existing step that used to run V1 against each changed file with the snippet below. Adjust to your job +context and branching strategy as needed. + +```yaml +# ...existing job steps... +- name: Set up Python venv for scanner + run: | + cd Vulnerability_Tool_V2 + chmod +x ./setup_venv.sh || true + ./setup_venv.sh + +- name: Run Vulnerability_Tool_V2 scanner + run: | + SCAN_OUTPUT=Vulnerability_Tool_V2/reports/security_report_${{ github.sha }}.html + Vulnerability_Tool_V2/venv/bin/python Vulnerability_Tool_V2/scanner_v2.py --target . --format html --output "$SCAN_OUTPUT" --verbose + ls -la Vulnerability_Tool_V2/reports || true + +- name: Upload scanner report + uses: actions/upload-artifact@v4 + with: + name: security-scan-report + path: Vulnerability_Tool_V2/reports/security_report_${{ github.sha }}.html +``` + +Minimal alternative (do not create venv; use system Python) + +```yaml +- name: Install scanner deps (system python) + run: | + python3 -m pip install --upgrade pip + pip install -r Vulnerability_Tool_V2/requirements.txt + +- name: Run scanner (system python) + run: | + python3 Vulnerability_Tool_V2/scanner_v2.py --target . --format html --output Vulnerability_Tool_V2/reports/security_report_${{ github.sha }}.html +``` + +Notes and optimizations +- Caching: use `actions/cache` to speed up dependency installation (cache pip wheel files or pip cache directories). +- Exit code behavior: V2 will return a non-zero exit code if CRITICAL issues are found (the CLI returns 1 on + critical findings). If you want PRs to be blocked on criticals, keep this behavior. Otherwise, use + `continue-on-error: true` for the scanner step or capture the exit code and treat it as a warning while still + uploading the report. +- Scan scope: V1 was scanning changed files one-by-one. V2 is intended to scan directories. If you want to scan only + changed files, you can copy changed files to a temporary directory in the job and use `--target` to point to that + temporary directory, or adapt scanner configuration to accept a file-list. + +Additional resources +- Scanner entrypoint: `Vulnerability_Tool_V2/scanner_v2.py` +- Requirements: `Vulnerability_Tool_V2/requirements.txt` +- venv setup script: `Vulnerability_Tool_V2/setup_venv.sh` + +Handover +Assign this task to the person responsible for CI and link this document in the issue/PR. Include the acceptance +criteria in the PR description when implementing the change. diff --git a/Vulnerability_Tool_V2/.gitignore b/Vulnerability_Tool_V2/.gitignore new file mode 100644 index 0000000..003eb2b --- /dev/null +++ b/Vulnerability_Tool_V2/.gitignore @@ -0,0 +1,4 @@ +# Ignore generated reports from the scanner +reports/ +# Keep temp json files used during rendering +tmp_*.json diff --git a/Vulnerability_Tool_V2/config/scanner_config.yaml b/Vulnerability_Tool_V2/config/scanner_config.yaml new file mode 100644 index 0000000..596d47e --- /dev/null +++ b/Vulnerability_Tool_V2/config/scanner_config.yaml @@ -0,0 +1,136 @@ +logging: + file_output: false + file_path: logs/scanner.log + format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + level: INFO +plugins: + jwt_configuration: + config: + check_env_files: true + min_secret_length: 32 + enabled: true + jwt_missing_protection: + config: + check_middleware: true + check_routes: true + exclude_paths: + - /health + - /api-docs + enabled: true + general_security: + config: + legacy_rules: + enabled: true + # default path relative to plugin dir; can be overridden with absolute path + path: plugins/general_security/rules_v1.yaml + # plugin-specific defaults + allowlist_keys: ['TEST_SECRET', 'PLACEHOLDER', 'EXAMPLE_SECRET'] + exclude_paths: ['tests/', '__tests__/', 'fixtures/', 'node_modules/', '.venv/', 'venv/'] + enabled: true + rls_missing_protection_disabled: + config: + rls_indicators: + - auth.uid() + - current_user + - user_id + - auth_user + - rls + - row level security + sensitive_tables: + - users + - user_profiles + - auth_logs + - user_sessions + - recipes + - meal_plans + - appointments + - medical_predictions + - user_feedback + - notifications + enabled: false + severity_override: null +reports: + formats: + html: + enabled: false + include_css: true + include_js: false + template: default + json: + enabled: false + include_metadata: true + indent: 2 + text: + enabled: false + include_summary: true + max_width: 120 + group_by_severity: true + include_file_paths: true + include_source_snippets: true + include_timestamps: true + max_snippet_lines: 5 + sort_by_severity: true +scanner: + description: Specialized security scanner for NutriHelp project + exclude_directories: + - node_modules + - .git + - __pycache__ + - venv + - .venv + - dist + - build + - uploads + - temp + file_extensions: + - .js + - .ts + - .py + - .sql + - .json + - .yaml + - .yml + - .env + name: NutriHelp Security Scanner V2.0 + # Engine-level global exclude paths (substring matches). These will be + # injected into each plugin's exclude_paths so plugins and engine skip them. + global_exclude_paths: + - tests/ + - __tests__/ + - fixtures/ + - Vulnerability_Tool_V2/ + - reports/ + - scripts/ + - tools/ + - venv/ + - .venv/ + - node_modules/ + + # New: internal scanner files filtering + exclude_internal_scanner_files: true + internal_paths: + - routes/scanner.js + - reports/ + - Vulnerability_Tool_V2/ + - Vulnerability_Tool/ + - scripts/ + scan_settings: + max_file_size_mb: 50 + parallel_scanning: false + timeout_seconds: 300 + version: 2.0.0 + # Recognize common global middleware names that indicate protection when used + trusted_global_middlewares: + - authenticateToken + - useAuth + - authorizeRoles + - optionalAuth + # Public paths that should not trigger JWT-missing alerts (prefix or exact match) + public_paths: + - /login + - /signup + - /health + - /status + - /mfa + - /classify + - /integrity-check diff --git a/Vulnerability_Tool_V2/core/config_manager.py b/Vulnerability_Tool_V2/core/config_manager.py new file mode 100644 index 0000000..abe984b --- /dev/null +++ b/Vulnerability_Tool_V2/core/config_manager.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +""" +Configuration Manager - handles loading and validation of YAML configuration files +""" + +import os +import yaml +import logging +from typing import Dict, Any, Optional +from pathlib import Path + + +class ConfigManager: + """Configuration Manager""" + + def __init__(self, config_path: Optional[str] = None): + self.config_path = config_path or self._get_default_config_path() + self.config: Dict[str, Any] = {} + self.logger = logging.getLogger("ConfigManager") + self._load_config() + + def _get_default_config_path(self) -> str: + """Get default config file path""" + current_dir = Path(__file__).parent + return str(current_dir.parent / "config" / "scanner_config.yaml") + + def _load_config(self): + """Load config file""" + try: + if os.path.exists(self.config_path): + with open(self.config_path, 'r', encoding='utf-8') as f: + self.config = yaml.safe_load(f) or {} + self.logger.info(f"Loaded configuration from {self.config_path}") + else: + self.logger.warning(f"Config file not found: {self.config_path}") + self.config = self._get_default_config() + self.logger.info("Using default configuration") + except Exception as e: + self.logger.error(f"Error loading configuration: {e}") + self.config = self._get_default_config() + + def _get_default_config(self) -> Dict[str, Any]: + """Get default configuration""" + return { + 'scanner': { + 'name': 'NutriHelp Security Scanner V2.0', + 'version': '2.0.0', + 'file_extensions': ['.js', '.py', '.ts', '.sql'], + 'exclude_directories': ['node_modules', '.git', '__pycache__'] + }, + 'plugins': {}, + 'reports': { + 'include_source_snippets': True, + 'group_by_severity': True + } + } + + def get(self, key: str, default: Any = None) -> Any: + """Get configuration value (supports dot notation)""" + keys = key.split('.') + value = self.config + + try: + for k in keys: + value = value[k] + return value + except (KeyError, TypeError): + return default + + def get_scanner_config(self) -> Dict[str, Any]: + """Get scanner configuration""" + return self.get('scanner', {}) + + def get_plugin_config(self, plugin_name: str) -> Dict[str, Any]: + """Get specific plugin configuration""" + return self.get(f'plugins.{plugin_name}', {}) + + def get_enabled_plugins(self) -> Dict[str, Dict[str, Any]]: + """Get enabled plugin configuration""" + plugins = self.get('plugins', {}) + enabled_plugins = {} + + for plugin_name, plugin_config in plugins.items(): + if plugin_config.get('enabled', True): + enabled_plugins[plugin_name] = plugin_config + + return enabled_plugins + + def get_report_config(self) -> Dict[str, Any]: + """Get report configuration""" + return self.get('reports', {}) + + def validate_config(self) -> bool: + """Validate configuration file""" + required_sections = ['scanner', 'plugins'] + + for section in required_sections: + if section not in self.config: + self.logger.error(f"Missing required config section: {section}") + return False + + # Validate scanner configuration + scanner_config = self.config['scanner'] + if 'name' not in scanner_config or 'version' not in scanner_config: + self.logger.error("Scanner config missing name or version") + return False + + self.logger.info("Configuration validation passed") + return True + + def reload_config(self): + """Reload configuration""" + self._load_config() + + def save_config(self, config_path: Optional[str] = None): + """Save configuration to file""" + save_path = config_path or self.config_path + try: + with open(save_path, 'w', encoding='utf-8') as f: + yaml.dump(self.config, f, default_flow_style=False, indent=2) + self.logger.info(f"Configuration saved to {save_path}") + except Exception as e: + self.logger.error(f"Error saving configuration: {e}") \ No newline at end of file diff --git a/Vulnerability_Tool_V2/core/report_renderer.py b/Vulnerability_Tool_V2/core/report_renderer.py new file mode 100644 index 0000000..1198816 --- /dev/null +++ b/Vulnerability_Tool_V2/core/report_renderer.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python3 +"""Shared HTML report renderer used by both CLI and API. +Place the common HTML template and rendering logic here so outputs are consistent. +""" +from datetime import datetime +from typing import Dict, Any + + +def render_html_report(scan_results: Dict[str, Any], config_manager=None) -> str: + """Render the HTML report from scan_results dict. + + scan_results must contain keys: summary, findings, scan_info + """ + summary = scan_results.get('summary', {}) + findings = scan_results.get('findings', []) + scan_info = scan_results.get('scan_info', {}) + + # Use the same HTML template as CLI previously used + html_template = """ + + + + + + NutriHelp Vulnerability Scanner V2.0 Report + + + +
+
+

🔒 NutriHelp Security Scanner V2.0

+
+
Scan time: {timestamp}
+
Target path: {target_path}
+
Scanner version: {scanner_version}
+
+
+ +
+
+
{critical_count}
+
Critical Issues
+
+
+
{high_count}
+
High Severity
+
+
+
{medium_count}
+
Medium Severity
+
+
+
{low_count}
+
Low Severity
+
+
+ +
+
+
+
{files_scanned}
+
Files Scanned
+
+
+
{plugins_used}
+
Plugins Used
+
+
+
{total_findings}
+
Total Issues
+
+
+ + {findings_html} +
+ + +
+ + + """ + + # Generate HTML for findings + if not findings: + findings_html = '

✅ No Vulnerabilities Found!

Your codebase has passed all vulnerability checks.

' + else: + findings_html = '

🔍 Detailed Findings

' + + # Sort by severity + sorted_findings = sorted(findings, key=lambda x: { + 'CRITICAL': 0, 'HIGH': 1, 'MEDIUM': 2, 'LOW': 3, 'INFO': 4 + }.get(x.get('severity', 'MEDIUM'), 2)) + + for finding in sorted_findings: + severity = finding.get('severity', 'MEDIUM').lower() + # Format recommendation: support structured object or plain text + recommendation = finding.get('recommendation', None) + formatted_recommendation = '' + + if isinstance(recommendation, dict): + parts = [] + rec_summary = recommendation.get('summary') + steps = recommendation.get('steps', []) + code = recommendation.get('code', '') + + if rec_summary: + parts.append(f"

{rec_summary}

") + if steps: + parts.append('
    ') + for s in steps: + parts.append(f"
  1. {s}
  2. ") + parts.append('
') + if code: + parts.append(f"
{code}
") + + formatted_recommendation = '\n'.join(parts) + elif isinstance(recommendation, str) and recommendation: + formatted_recommendation = recommendation.replace("\n\n", "

").replace("\n", "
") + else: + formatted_recommendation = '

Please review this security issue and take appropriate remediation steps.

' + + finding_html = f""" +
+
+
{finding.get('title','')}
+ {finding.get('severity','MEDIUM')} +
+ +
+ 📄 + {finding.get('file_path','')} + {f" (Line {finding.get('line_number')})" if finding.get('line_number') else ''} +
+ +
{finding.get('description','')}
+ + + {f"
Rule: {finding.get('rule_id')} - {finding.get('rule_name')}
" if finding.get('rule_id') or finding.get('rule_name') else ''} + +
+ 💡 Recommendation: + {formatted_recommendation} +
+ +
+ Plugin: {finding.get('plugin_name', finding.get('plugin', 'Unknown'))} + {f" | Rule: {finding.get('rule_id') or ''}{(' - ' + finding.get('rule_name')) if finding.get('rule_name') else ''}" if finding.get('rule_id') or finding.get('rule_name') else ''} +
+
+ """ + findings_html += finding_html + + # Format timestamp safely + timestamp = ( + scan_results.get('timestamp') or + scan_info.get('timestamp') or + datetime.now().isoformat() + ) + try: + if 'Z' in str(timestamp): + timestamp_obj = datetime.fromisoformat(str(timestamp).replace('Z', '+00:00')) + else: + timestamp_obj = datetime.fromisoformat(str(timestamp)) + formatted_timestamp = timestamp_obj.strftime('%Y-%m-%d %H:%M:%S') + except Exception: + formatted_timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + return html_template.format( + timestamp=formatted_timestamp, + target_path=scan_info.get('target_path', '../'), + scanner_version=scan_info.get('scanner_version', '2.0.0'), + critical_count=summary.get('by_severity', {}).get('CRITICAL', 0), + high_count=summary.get('by_severity', {}).get('HIGH', 0), + medium_count=summary.get('by_severity', {}).get('MEDIUM', 0), + low_count=summary.get('by_severity', {}).get('LOW', 0), + files_scanned=scan_info.get('stats', {}).get('files_scanned', 0), + plugins_used=scan_info.get('stats', {}).get('plugins_loaded', 0), + total_findings=summary.get('total', 0), + findings_html=findings_html + ) diff --git a/Vulnerability_Tool_V2/core/scanner_engine.py b/Vulnerability_Tool_V2/core/scanner_engine.py new file mode 100644 index 0000000..870997f --- /dev/null +++ b/Vulnerability_Tool_V2/core/scanner_engine.py @@ -0,0 +1,540 @@ +#!/usr/bin/env python3 +""" +NutriHelp Security Scanner V2.0 - Core Engine +""" + +import os +import sys +import importlib +import logging +import re +from typing import List, Dict, Any, Optional +from pathlib import Path +import uuid +from datetime import datetime + +# Add the plugin directory to the Python path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from plugins.base_plugin import BaseSecurityPlugin, PluginManager, SecurityFinding + + +class VulnerabilityScannerEngine: + """Vulnerability Scanner Engine Core Class""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + self.config = config or {} + self.plugin_manager = PluginManager() + self.logger = logging.getLogger("VulnerabilityScannerEngine") + self._setup_logging() + + # Statistics + self.stats = { + 'files_scanned': 0, + 'total_findings': 0, + 'plugins_loaded': 0 + } + + def _setup_logging(self): + """Set up logging configuration""" + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[ + logging.StreamHandler(), + # can add file processors + ] + ) + + def load_plugins(self, plugin_configs: Optional[Dict[str, Any]] = None): + """Dynamically load plugins""" + plugin_configs = plugin_configs or {} + plugins_loaded = 0 + + # Define plugin mappings + plugin_mappings = { + 'jwt_missing_protection': 'plugins.jwt_security.jwt_missing', + 'jwt_configuration': 'plugins.jwt_security.jwt_config', + # General security plugin + 'general_security': 'plugins.general_security', + # RLS plugin removed to fix dependency issues + } + + for plugin_name, module_path in plugin_mappings.items(): + plugin_config_raw = plugin_configs.get(plugin_name, {}) + + # Support two shapes: either {plugin: {'config': {...}, 'enabled': True}} or {plugin: {...}} + # If caller provided a wrapper with 'config', prefer that; otherwise treat the value as the config itself. + plugin_config = plugin_config_raw.get('config', plugin_config_raw) if isinstance(plugin_config_raw, dict) else {} + + # Check if the plugin is enabled (allow enabled flag at the top-level of provided mapping) + enabled_flag = plugin_config_raw.get('enabled', True) if isinstance(plugin_config_raw, dict) else True + if not enabled_flag: + self.logger.info(f"Plugin {plugin_name} is disabled") + continue + + try: + # Dynamically import plugin module + module = importlib.import_module(module_path) + + # Find plugin class (convention: ends with Plugin) + plugin_class = None + for attr_name in dir(module): + attr = getattr(module, attr_name) + if (isinstance(attr, type) and + issubclass(attr, BaseSecurityPlugin) and + attr != BaseSecurityPlugin): + plugin_class = attr + break + + if plugin_class: + plugin_instance = plugin_class(plugin_config) + self.plugin_manager.register_plugin(plugin_instance) + plugins_loaded += 1 + else: + self.logger.warning(f"No plugin class found in {module_path}") + + except ImportError as e: + self.logger.warning(f"Could not load plugin {plugin_name}: {e}") + except Exception as e: + self.logger.error(f"Error loading plugin {plugin_name}: {e}") + + self.stats['plugins_loaded'] = plugins_loaded + self.logger.info(f"Loaded {plugins_loaded} plugins") + + def _count_by_severity(self, findings: List[Any]) -> Dict[str, int]: + """Count findings by severity level.""" + severity_counts = {} + for finding in findings: + # Handle both object and dict findings + if hasattr(finding, 'severity'): + severity = finding.severity + else: + severity = finding.get('severity', 'UNKNOWN') + + severity = str(severity).upper() + severity_counts[severity] = severity_counts.get(severity, 0) + 1 + return severity_counts + + def _count_by_plugin(self, findings: List[Any]) -> Dict[str, int]: + """Count findings by plugin name.""" + plugin_counts = {} + for finding in findings: + # Handle both object and dict findings + if hasattr(finding, 'plugin'): + plugin = finding.plugin + elif isinstance(finding, dict): + # plugins may serialize under 'plugin_name' + plugin = finding.get('plugin_name') or finding.get('plugin') or 'Unknown' + else: + plugin = 'Unknown' + + plugin_counts[plugin] = plugin_counts.get(plugin, 0) + 1 + return plugin_counts + + def scan_target(self, target_path: str) -> Dict: + """ + Run all security plugins on the target. + """ + self.logger.info(f"Starting security scan on: {target_path}") + progress_enabled = os.environ.get('SCANNER_PROGRESS') == '1' + def emit_progress(pct: int, message: str): + if progress_enabled: + try: + # Clamp percent and print sentinel line (stdout flush forced) + pct_val = max(0, min(100, int(pct))) + print(f"PROGRESS|{pct_val}|{message}", flush=True) + except Exception: + pass + + # Discover global app.use protections (best-effort) + self.global_protected_prefixes = self._discover_global_app_use_protections(target_path) + self.logger.debug(f"Discovered protected prefixes: {self.global_protected_prefixes}") + + all_findings = [] + + # Make sure to count the number of files first + files_scanned = self._count_scannable_files(target_path) + self.stats['files_scanned'] = files_scanned + + total_plugins = len(self.plugin_manager.plugins) if self.plugin_manager.plugins else 1 + plugin_index = 0 + for plugin in self.plugin_manager.plugins: + plugin_index += 1 + emit_progress(int((plugin_index - 1) / total_plugins * 80), f"Running plugin: {plugin.__class__.__name__}") + try: + # Merge engine-level global excludes into plugin config so plugin-level + # checks (is_file_scannable / should_skip_directory / read_file_safe) honor them. + engine_excludes = self.config.get('global_exclude_paths', []) or [] + plugin_excludes = plugin.config.get('exclude_paths', []) or [] + # preserve order but avoid duplicates + merged = [] + for p in engine_excludes + plugin_excludes: + if p and p not in merged: + merged.append(p) + plugin.config['exclude_paths'] = merged + + findings = plugin.scan(target_path) + if findings: + # Process each finding + for finding in findings: + # Prefer plugin-provided recommendation; generate only if missing/empty + existing_rec = None + if hasattr(finding, 'recommendation'): + existing_rec = getattr(finding, 'recommendation') + elif isinstance(finding, dict): + existing_rec = finding.get('recommendation') + + if not existing_rec: + # Generate and add recommendation only when plugin didn't provide one + recommendation = self._generate_recommendation( + finding.title if hasattr(finding, 'title') else finding.get('title', ''), + finding.file_path if hasattr(finding, 'file_path') else finding.get('file_path', '') + ) + + # If finding is an object + if hasattr(finding, 'recommendation'): + finding.recommendation = recommendation + # If finding is a dictionary + elif isinstance(finding, dict): + finding['recommendation'] = recommendation + + # Ensure other attributes exist + if hasattr(finding, 'plugin') and not getattr(finding, 'plugin', None): + finding.plugin = plugin.__class__.__name__ + if hasattr(finding, 'file_path') and not getattr(finding, 'file_path', None): + finding.file_path = target_path + + all_findings.extend(findings) + except Exception as e: + self.logger.error(f"Plugin {plugin.__class__.__name__} failed: {e}") + + # Convert findings to ensure recommendations are included + emit_progress(85, "Aggregating findings") + findings_dict = [] + for f in all_findings: + if hasattr(f, 'to_dict'): + finding_dict = f.to_dict() + # Ensure recommendation is included in the dictionary + if hasattr(f, 'recommendation'): + finding_dict['recommendation'] = f.recommendation + findings_dict.append(finding_dict) + else: + findings_dict.append(f) + + # Sanitize findings: remove malformed entries and findings matching excluded paths + emit_progress(90, "Sanitizing findings") + findings_dict = self._sanitize_findings(findings_dict, target_path) + # Use sanitized findings for summary/stats to reflect what is reported + total_clean = len(findings_dict) + by_sev = self._count_by_severity(findings_dict) + by_plug = self._count_by_plugin(findings_dict) + + # update engine stats + try: + self.stats['total_findings'] = total_clean + except Exception: + self.stats['total_findings'] = total_clean + + # consistent timestamp for both top-level and scan_info + ts = datetime.now().isoformat() + + emit_progress(95, "Compiling summary") + result = { + 'scan_id': str(uuid.uuid4()), + 'target': target_path, + 'timestamp': ts, + 'findings': findings_dict, # Use the processed findings + 'summary': { + 'total': total_clean, + 'files_scanned': files_scanned, + 'by_severity': by_sev, + 'by_plugin': by_plug + }, + 'scan_info': { + 'target_path': target_path, + 'timestamp': ts, + 'scanner_version': "2.0.0", + 'stats': { + 'files_scanned': files_scanned, + 'plugins_loaded': len(self.plugin_manager.plugins), + 'total_findings': total_clean + } + } + } + emit_progress(100, "Scan complete") + return result + + # Backwards compatibility: keep old name as an alias for imports in other modules/tests + # (moved to module-level after class definition) + + def _count_scannable_files(self, target_path: str) -> int: + """Count scannable files""" + count = 0 + for root, dirs, files in os.walk(target_path): + # Skip directories that should not be scanned + dirs[:] = [d for d in dirs if not self._should_skip_dir(os.path.join(root, d))] + + for file in files: + file_path = os.path.join(root, file) + # Respect engine-level global_exclude_paths during counting + if self._is_excluded_path(file_path): + continue + if self._is_scannable_file(file_path): + count += 1 + return count + + def _discover_global_app_use_protections(self, target_path: str) -> List[str]: + """Best-effort discovery of app.use mounts that include trusted middleware. + + Looks for patterns like: app.use('/api', authenticateToken, router) + or app.use('/api', authenticateToken, otherRouter) + Returns list of path prefixes (e.g. ['/api', '/admin']). + """ + prefixes = [] + scanner_cfg = self.config.get('scanner', {}) or {} + trusted = scanner_cfg.get('trusted_global_middlewares', []) or [] + + # simple regex to capture app.use('', , ) + pattern = re.compile(r"app\.use\s*\(\s*['\"](?P/[^'\"]*)['\"]\s*,\s*(?P[^,\)]+)", re.IGNORECASE) + + for root, dirs, files in os.walk(target_path): + # do not descend into virtual envs or excluded dirs + dirs[:] = [d for d in dirs if not self._should_skip_dir(os.path.join(root, d))] + for fname in files: + if not fname.endswith('.js') and not fname.endswith('.ts'): + continue + fpath = os.path.join(root, fname) + try: + with open(fpath, 'r', encoding='utf-8', errors='ignore') as fh: + content = fh.read() + except Exception: + continue + + for m in pattern.finditer(content): + prefix = m.group('prefix') + mw_expr = m.group('mw').strip() + # mw_expr may be like 'authenticateToken' or 'authenticateToken,' or 'authenticateToken, router' + # normalize name + mw_name = re.split(r'[,\s\(\)]', mw_expr)[0] + if mw_name in trusted: + if prefix not in prefixes: + prefixes.append(prefix) + + return prefixes + + def _is_excluded_path(self, file_path: str) -> bool: + """Return True if the given path should be excluded by engine-level global rules.""" + try: + excludes = self._get_global_excludes() + # substring-based excludes (backwards compatible) + for p in excludes: + if p and p in file_path: + return True + + # path-segment based excludes: prevent matching arbitrary 'test_' substrings + test_dir_names = {'test', 'tests', '__tests__', 'spec', 'fixtures'} + try: + parts = Path(file_path).parts + if any(pname in test_dir_names for pname in parts): + return True + except Exception: + pass + + # filename-based heuristics: ignore files that are test files by naming convention + try: + base = os.path.basename(file_path).lower() + # common test filename patterns: testSomething.js, *.test.js, *.spec.js + if base.startswith('test') or base.endswith('.test.js') or base.endswith('.spec.js') or base.endswith('_test.js'): + return True + except Exception: + pass + except Exception: + pass + return False + + + def _get_global_excludes(self) -> List[str]: + """Return the configured global_exclude_paths from config. + + Supports either top-level 'global_exclude_paths' or nested under 'scanner'. + """ + try: + # prefer explicit top-level setting + top = self.config.get('global_exclude_paths') + excludes = top if top else [] + + # also support nested scanner config + scanner_cfg = self.config.get('scanner', {}) or {} + if isinstance(scanner_cfg, dict): + nested = scanner_cfg.get('global_exclude_paths', []) or [] + for p in nested: + if p and p not in excludes: + excludes.append(p) + + # support a config toggle for internal scanner file filtering + try: + cfg = self.config.get('scanner', {}) if self.config.get('scanner') else self.config + if cfg.get('exclude_internal_scanner_files', False): + internal = cfg.get('internal_paths', []) or self.config.get('internal_paths', []) or [] + for p in internal: + if p and p not in excludes: + excludes.append(p) + except Exception: + # ignore missing nested settings + pass + + return excludes + except Exception: + return [] + + def _sanitize_findings(self, findings: List[Dict[str, Any]], target_path: str) -> List[Dict[str, Any]]: + """Remove malformed findings and those that reference excluded paths. + + Rules: + - Must be a dict with at least one of: title, description, file_path + - If file_path exists and matches engine global_exclude_paths, drop it + - Drop entries that are not dicts or have no meaningful keys + """ + cleaned = [] + excludes = self._get_global_excludes() + + # load public paths from scanner config (prefix or exact match) + scanner_cfg = self.config.get('scanner', {}) or {} + public_paths = scanner_cfg.get('public_paths', []) or [] + + for f in findings: + if not isinstance(f, dict): + continue + + # basic schema presence + has_meaning = any(k in f and f.get(k) not in (None, '') for k in ('title', 'description', 'file_path')) + if not has_meaning: + continue + + fp = f.get('file_path') or '' + # normalize file path: if relative, resolve against the scan target + try: + if fp and not os.path.isabs(fp): + fp_norm = str(Path(target_path, fp).resolve()) + else: + fp_norm = str(Path(fp).resolve()) if fp else '' + except Exception: + fp_norm = fp + + if fp_norm and self._is_excluded_path(str(fp_norm)): + continue + + # If finding references an API route path (plugin should put route info in 'route' key), + # check if it's publicly whitelisted or protected by a global app.use prefix. + route_path = f.get('route') or '' + # if plugin didn't set route, try to extract from title like 'Missing JWT Protection: POST /path' + if not route_path: + title = f.get('title','') or '' + # try to find a space + /path pattern + m = re.search(r"(GET|POST|PUT|DELETE|PATCH)\s+(/[^\s,]*)", title, re.IGNORECASE) + if m: + route_path = m.group(2) + skip_due_to_route = False + if route_path: + # public path exact or prefix match + for pub in public_paths: + if not pub: + continue + # protect against None + rp = route_path or '' + if rp == pub or (rp and rp.startswith(pub)): + skip_due_to_route = True + break + + # check global protected prefixes discovered earlier + if not skip_due_to_route: + prefixes = getattr(self, 'global_protected_prefixes', []) or [] + for pref in prefixes: + if not pref: + continue + p = pref.rstrip('/') + if route_path == p or route_path.startswith(p + '/') or route_path.startswith(pref): + skip_due_to_route = True + break + + if skip_due_to_route: + continue + + cleaned.append(f) + + return cleaned + + def _should_skip_dir(self, dir_path: str) -> bool: + """Check if a directory should be skipped""" + skip_dirs = self.config.get('exclude_directories', [ + 'node_modules', '.git', '__pycache__', 'venv', '.venv', + 'dist', 'build', 'uploads' + ]) + dir_name = os.path.basename(dir_path) + return dir_name in skip_dirs + + def _is_scannable_file(self, file_path: str) -> bool: + """Check if a file is scannable""" + supported_extensions = self.config.get('file_extensions', [ + '.js', '.ts', '.py', '.sql', '.json', '.yaml', '.yml' + ]) + file_ext = os.path.splitext(file_path)[1].lower() + return file_ext in supported_extensions + + def _generate_summary(self, findings: List[SecurityFinding]) -> Dict[str, Any]: + """Generate scan summary""" + summary = { + 'total': len(findings), + 'by_severity': {'CRITICAL': 0, 'HIGH': 0, 'MEDIUM': 0, 'LOW': 0}, + 'by_plugin': {} + } + + for finding in findings: + # Count by severity + severity = finding.severity.upper() + if severity in summary['by_severity']: + summary['by_severity'][severity] += 1 + + # Count by plugin + plugin_name = finding.plugin or 'Unknown' + if plugin_name not in summary['by_plugin']: + summary['by_plugin'][plugin_name] = 0 + summary['by_plugin'][plugin_name] += 1 + + return summary + + def _get_timestamp(self) -> str: + """Get timestamp""" + from datetime import datetime + return datetime.now().isoformat() + + def get_scan_stats(self) -> Dict[str, Any]: + """Get scan statistics""" + return self.stats.copy() + + def _generate_recommendation(self, finding_type: str, file_path: str) -> str: + """Generate specific recommendations based on finding type.""" + # Return a structured recommendation dict + if "JWT" in finding_type: + return { + 'summary': 'Add JWT authentication middleware to the route.', + 'steps': [ + "Import the middleware if missing: const { authenticateToken } = require('../middleware/authenticateToken');", + "Add middleware to the route: e.g. router.post('/', authenticateToken, (req, res) => { ... });", + "Consider optional authentication helper if needed: const { optionalAuth } = require('../middleware/authenticateToken');", + "Verify token lifetimes and error handling policies." + ], + 'code': "const { authenticateToken } = require('../middleware/authenticateToken');\nrouter.post('/', authenticateToken, (req, res) => { ... });" + } + + # Add more recommendation types as needed + return { + 'summary': 'Review this finding and apply best-practice remediation steps.', + 'steps': ["Investigate the issue details.", "Apply an appropriate fix and test."], + 'code': '' + } + + +# Backwards compatibility: keep old name as an alias for imports in other modules/tests +SecurityScannerEngine = VulnerabilityScannerEngine \ No newline at end of file diff --git a/Vulnerability_Tool_V2/plugins/base_plugin.py b/Vulnerability_Tool_V2/plugins/base_plugin.py new file mode 100644 index 0000000..072a7db --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/base_plugin.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 +""" +Base plugin class for NutriHelp Security Scanner V2.0 +""" + +from abc import ABC, abstractmethod +from typing import List, Dict, Any, Optional, Union +import logging +import os +from datetime import datetime + + +class SecurityFinding: + """Standardized security discovery objects""" + + def __init__(self, title: str, severity: str, file_path: str, + description: str, line_number: Optional[int] = None, + plugin: Optional[str] = None, recommendation: Optional[Union[str, Dict[str, Any]]] = None, + rule_id: Optional[str] = None, rule_name: Optional[str] = None, + rule_mode: Optional[str] = None, confidence: Optional[str] = None, + route: Optional[str] = None, http_method: Optional[str] = None): + + self.title = title + self.severity = severity + self.file_path = file_path + self.description = description + self.line_number = line_number + self.plugin = plugin + self.recommendation = recommendation + self.rule_id = rule_id + self.rule_name = rule_name + self.rule_mode = rule_mode + self.confidence = confidence + # optional route information for API-related findings + self.route = route + self.http_method = http_method + + def to_dict(self) -> Dict[str, Any]: + return { + 'title': self.title, + 'severity': self.severity, + 'file_path': self.file_path, + 'line_number': self.line_number, + 'description': self.description, + 'plugin_name': self.plugin, + 'recommendation': self.recommendation, + 'rule_id': getattr(self, 'rule_id', None), + 'rule_name': getattr(self, 'rule_name', None), + 'rule_mode': getattr(self, 'rule_mode', None), + 'confidence': getattr(self, 'confidence', None), + 'route': getattr(self, 'route', None), + 'http_method': getattr(self, 'http_method', None) + } + + + +class BaseSecurityPlugin(ABC): + """Base class for all security plugins""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + self.config = config or {} + self.name = self.__class__.__name__ + self.findings: List[SecurityFinding] = [] + self.logger = logging.getLogger(f"SecurityPlugin.{self.name}") + self._setup_logging() + + def _setup_logging(self): + """Set up logging configuration""" + if not self.logger.handlers: + handler = logging.StreamHandler() + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + ) + handler.setFormatter(formatter) + self.logger.addHandler(handler) + self.logger.setLevel(logging.INFO) + + @abstractmethod + def get_plugin_info(self) -> Dict[str, str]: + """Return plugin metadata information""" + pass + + @abstractmethod + def scan(self, target_path: str) -> List[SecurityFinding]: + """Perform security scan and return discovered issues""" + pass + + @abstractmethod + def get_severity_level(self) -> str: + """Return default severity level for issues detected by the plugin""" + pass + + def add_finding(self, title: str, description: str, file_path: str, + line_number: Optional[int] = None, severity: Optional[str] = None, + recommendation: Optional[str] = None, rule_id: Optional[str] = None, + rule_name: Optional[str] = None, rule_mode: Optional[str] = None, + confidence: Optional[str] = None) -> SecurityFinding: + """Add security finding""" + if severity is None: + severity = self.get_severity_level() + + finding = SecurityFinding( + title=title, + severity=severity, + file_path=file_path, + description=description, + line_number=line_number, + recommendation=recommendation, + rule_id=rule_id, + rule_name=rule_name, + rule_mode=rule_mode, + confidence=confidence + ) + finding.plugin = self.name + self.findings.append(finding) + + self.logger.info(f"Added {severity} finding: {title}") + return finding + + def clear_findings(self): + """Clear all findings""" + self.findings.clear() + + def is_file_scannable(self, file_path: str) -> bool: + """Check if a file is scannable""" + # Respect plugin-level exclude_paths first (substring match) + exclude_paths = self.config.get('exclude_paths', []) + try: + if any(p and p in file_path for p in exclude_paths): + return False + except Exception: + # fallback to normal behavior on unexpected errors + pass + + # Get supported file extensions + supported_extensions = self.config.get('file_extensions', ['.js', '.py', '.ts']) + file_ext = os.path.splitext(file_path)[1].lower() + return file_ext in supported_extensions + + def should_skip_directory(self, dir_path: str) -> bool: + """Check if a directory should be skipped""" + # engine/plugin-level directory skip list + skip_dirs = self.config.get('skip_directories', [ + 'node_modules', '.git', '__pycache__', 'venv', '.venv' + ]) + dir_name = os.path.basename(dir_path) + if dir_name in skip_dirs: + return True + + # Also honor plugin-level exclude_paths which may be substrings of paths + exclude_paths = self.config.get('exclude_paths', []) + try: + if any(p and p in dir_path for p in exclude_paths): + return True + except Exception: + pass + + return False + + def read_file_safe(self, file_path: str) -> Optional[str]: + """Safely read file content""" + # Respect exclude_paths before attempting to read + exclude_paths = self.config.get('exclude_paths', []) + try: + if any(p and p in file_path for p in exclude_paths): + self.logger.debug(f"Skipping read for excluded path: {file_path}") + return None + except Exception: + # proceed to read if exclude_paths check fails + pass + + try: + with open(file_path, 'r', encoding='utf-8') as f: + return f.read() + except (UnicodeDecodeError, PermissionError) as e: + self.logger.warning(f"Cannot read file {file_path}: {e}") + return None + + def get_relative_path(self, file_path: str, base_path: str) -> str: + """Get relative path""" + try: + return os.path.relpath(file_path, base_path) + except ValueError: + return file_path + + +class PluginManager: + """Plugin manager""" + + def __init__(self): + self.plugins: List[BaseSecurityPlugin] = [] + self.logger = logging.getLogger("PluginManager") + + def register_plugin(self, plugin: BaseSecurityPlugin): + """Register plugin""" + self.plugins.append(plugin) + info = plugin.get_plugin_info() + self.logger.info(f"Registered plugin: {info['name']} v{info['version']}") + + def get_plugins(self) -> List[BaseSecurityPlugin]: + """Get all registered plugins""" + return self.plugins + + def run_all_scans(self, target_path: str) -> Dict[str, List[SecurityFinding]]: + """Run all plugin scans""" + results = {} + + for plugin in self.plugins: + plugin.clear_findings() # Clear previous results + try: + findings = plugin.scan(target_path) + results[plugin.name] = findings + self.logger.info(f"Plugin {plugin.name} found {len(findings)} issues") + except Exception as e: + self.logger.error(f"Plugin {plugin.name} failed: {e}") + results[plugin.name] = [] + + return results \ No newline at end of file diff --git a/Vulnerability_Tool_V2/plugins/general_security/README.md b/Vulnerability_Tool_V2/plugins/general_security/README.md new file mode 100644 index 0000000..7d17c38 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/general_security/README.md @@ -0,0 +1,56 @@ +# General Security Plugin + +This plugin (`general_security`) detects common, generic security issues across a codebase. + +## What it detects + +- Hardcoded secrets + - Looks for variable-like keys (e.g. `password`, `secret`, `api_key`, `token`, `jwt_secret`) assigned string values. + - Requires value length >= 8 to reduce incidental matches. +- Hardcoded database connection strings + - Matches `postgres://` and `mysql://` style URLs in code/config files. +- Permissive CORS configurations + - Detects `Access-Control-Allow-Origin: *` or `origin: '*'` patterns. + +## Configuration + +The plugin reads configuration from the scanner's plugin config and supports the following keys: + +- `enabled` (bool): Enable or disable this plugin. +- `allowlist_keys` (list[str]): Keys to ignore when scanning for secrets (case-insensitive). +- `exclude_paths` (list[str]): Path substrings; if any matches a file path, that file will be skipped by this plugin. +- `secret_keys_allowlist` (list[str]): Additional secret key names to match. + +Example config snippet (in scanner config): + +```yaml +plugins: + general_security: + enabled: true + allowlist_keys: + - 'TEST_SECRET' + exclude_paths: + - 'migrations/' + - 'tests/' + secret_keys_allowlist: + - 'password' + - 'jwt' +``` + +## False positives and mitigation + +- The plugin uses heuristics and simple regexes; add known safe keys to `allowlist_keys` and common test/dev paths to `exclude_paths`. +- If you see many false positives from a specific pattern, prefer to add an allowlist entry or refine your scanner config. + +## Extending + +This plugin is intentionally lightweight. To add more checks: +- Implement additional regex or AST-based checks in `scan()`. +- Use `self.add_finding(...)` to add structured findings (recommendation can be a dict for richer rendering). + +## Output + +Findings are returned as `SecurityFinding` objects (converted to dict by the engine) with fields: +- `title`, `severity`, `file_path`, `line_number`, `description`, `plugin_name`, `recommendation` + +These are rendered in the HTML/JSON reports by the shared renderer. diff --git a/Vulnerability_Tool_V2/plugins/general_security/__init__.py b/Vulnerability_Tool_V2/plugins/general_security/__init__.py new file mode 100644 index 0000000..90936ff --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/general_security/__init__.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +"""General security plugin: detects common hardcoded secrets, DB connection strings, +and permissive CORS configurations. +""" +import os +import re +from typing import List, Dict, Any, Optional + +from plugins.base_plugin import BaseSecurityPlugin, SecurityFinding +import yaml + + +class GeneralSecurityPlugin(BaseSecurityPlugin): + def __init__(self, config: Optional[Dict[str, Any]] = None): + super().__init__(config) + self.name = 'general_security' + # Merge sensible defaults to reduce noisy results + defaults = { + 'allowlist_keys': ['TEST_SECRET', 'PLACEHOLDER', 'EXAMPLE_SECRET'], + 'exclude_paths': ['tests/', '__tests__/', 'fixtures/', 'node_modules/', '.venv/', 'venv/'], + 'secret_keys_allowlist': ['password', 'passwd', 'secret', 'api_key', 'apiKey', 'token', 'jwt_secret', 'jwt'] + } + # Only set defaults for keys not provided by user config + for k, v in defaults.items(): + if k not in self.config: + self.config[k] = v + + # Legacy rules support (load rules_v1.yaml if enabled) + self.legacy_rules: List[Dict[str, Any]] = [] + legacy_cfg = self.config.get('legacy_rules', {}) + include_debug = legacy_cfg.get('include_debug_rules', False) + if legacy_cfg.get('enabled', True): + rules_path = legacy_cfg.get('path', os.path.join(os.path.dirname(__file__), 'rules_v1.yaml')) + try: + if os.path.exists(rules_path): + with open(rules_path, 'r', encoding='utf-8') as rf: + loaded = yaml.safe_load(rf) or [] + # compile regex for faster matching later + for r in loaded: + # skip any malformed entries (e.g., null lines) that aren't mappings + if not isinstance(r, dict): + self.logger.debug("Skipping non-dict legacy rule entry") + continue + try: + # skip debug-only rules unless explicitly enabled in config + if r.get('mode') == 'debug' and not include_debug: + continue + r['compiled'] = re.compile(r.get('regex', ''), re.MULTILINE | re.DOTALL) + self.legacy_rules.append(r) + except re.error: + self.logger.warning(f"Invalid regex in legacy rule {r.get('id')}") + self.logger.info(f"Loaded {len(self.legacy_rules)} legacy rules from {rules_path}") + else: + self.logger.info(f"Legacy rules file not found at {rules_path}") + except Exception as e: + self.logger.warning(f"Failed to load legacy rules: {e}") + + def get_plugin_info(self) -> Dict[str, str]: + return { + 'name': 'general_security', + 'version': '0.1.0', + 'description': 'Detect generic security issues such as hardcoded secrets, DB URLs and permissive CORS.' + } + + def get_severity_level(self) -> str: + return 'MEDIUM' + + def scan(self, target_path: str) -> List[SecurityFinding]: + # Walk files under target_path and perform lightweight pattern checks + for root, dirs, files in os.walk(target_path): + # Respect skip directories from base class config + dirs[:] = [d for d in dirs if not self.should_skip_directory(os.path.join(root, d))] + + for fname in files: + fpath = os.path.join(root, fname) + # Respect plugin-configured exclude paths early to avoid scanning tests/fixtures etc. + exclude_paths = self.config.get('exclude_paths', []) + if any(p and p in fpath for p in exclude_paths): + # skip this file entirely + continue + # allow additional common config file extensions even if base class + # doesn't include them (e.g., .conf, .env, .ini, .yaml, .yml, .txt) + extra_exts = {'.conf', '.env', '.ini', '.yaml', '.yml', '.json', '.txt'} + file_ext = os.path.splitext(fpath)[1].lower() + if not (self.is_file_scannable(fpath) or file_ext in extra_exts): + continue + + content = self.read_file_safe(fpath) + if not content: + continue + + # Apply legacy rules if present + if self.legacy_rules: + for rule in self.legacy_rules: + try: + matches = list(rule['compiled'].finditer(content)) + except Exception: + matches = [] + for m in matches: + title = rule.get('name') or rule.get('id') + severity = rule.get('severity', self.get_severity_level()) + desc = rule.get('description', '') + rec = rule.get('recommendation') + line_no = self._estimate_line_number(content, m.start()) + # Prevent duplicating findings for short allowlisted patterns + self.add_finding( + title=title, + description=f"{desc} -- match: {m.group(0)[:200]}", + file_path=fpath, + line_number=line_no, + severity=severity, + recommendation=rec, + rule_id=rule.get('id'), + rule_name=rule.get('name'), + rule_mode=rule.get('mode'), + confidence=rule.get('confidence', 'MEDIUM') + ) + + # 1) hardcoded secrets (improved heuristic) + # Require variable-like keys and a reasonably long secret value (to avoid short incidental matches) + secret_keys = self.config.get('secret_keys_allowlist', ['password', 'passwd', 'secret', 'api_key', 'apiKey', 'token', 'jwt_secret', 'jwt']) + secret_keys_re = r"(?:" + r"|".join([re.escape(k) for k in secret_keys]) + r")" + # match patterns like: KEY = 'value' or "KEY": "value"; value must be at least 8 chars and not contain whitespace/newlines + secret_pattern = re.compile(rf"(?i)({secret_keys_re})\s*[:=]\s*[\'\"]([A-Za-z0-9@#\$%\^&\-_=+\./\\~`{{}}\|]{{8,512}})[\'\"]") + for m in secret_pattern.finditer(content): + key = m.group(1) + value = m.group(2) + # allowlist check: if key or file path is explicitly allowed, skip + allowlist_keys = [k.lower() for k in self.config.get('allowlist_keys', [])] + if key.lower() in allowlist_keys: + continue + exclude_paths = self.config.get('exclude_paths', []) + if any(p and p in fpath for p in exclude_paths): + continue + + self.add_finding( + title=f'Hardcoded secret: {key}', + description=f'Found likely hardcoded secret key "{key}" in file. Value length: {len(value)}', + file_path=fpath, + line_number=self._estimate_line_number(content, m.start()), + severity='CRITICAL', + recommendation={ + 'summary': 'Remove hardcoded secrets and use environment variables or a secrets manager.', + 'steps': [ + 'Move the secret into an environment variable or encrypted store.', + 'Rotate the exposed secret immediately if used in production.', + 'Ensure secrets are not committed to VCS.' + ] + }, + confidence='HIGH' + ) + + # 2) DB connection strings + # match postgres://... or mysql://... regardless of surrounding quotes + db_pattern = re.compile(r"(?i)(?:postgres(?:ql)?|mysql)://[^\s'\"`<>]+") + for m in db_pattern.finditer(content): + self.add_finding( + title='Hardcoded DB connection string', + description='Found a database connection string in code or config which may contain credentials.', + file_path=fpath, + line_number=self._estimate_line_number(content, m.start()), + severity='HIGH', + recommendation='Move DB credentials to environment variables and avoid committing connection strings.', + confidence='HIGH' + ) + + # 3) permissive CORS or wildcard origins (simple checks) + # look for Access-Control-Allow-Origin: * or origin: '*' in JS/TS configs + if re.search(r"Access-Control-Allow-Origin\s*:\s*\*", content) or re.search(r"origin\s*[:=]\s*[\'\"]\*\b", content): + self.add_finding( + title='Permissive CORS configuration', + description='Detected wildcard CORS origin which allows any origin to access resources.', + file_path=fpath, + line_number=None, + severity='MEDIUM', + recommendation={ + 'summary': 'Restrict CORS origins to a specific allowlist.', + 'steps': [ + 'Replace wildcard origin with an explicit list of allowed origins.', + 'If dynamic, validate and sanitize the Origin header before echoing it back.' + ] + }, + confidence='LOW' + ) + + return self.findings + + def _estimate_line_number(self, content: str, pos: int) -> Optional[int]: + try: + return content[:pos].count('\n') + 1 + except Exception: + return None diff --git a/Vulnerability_Tool_V2/plugins/general_security/rules_v1.yaml b/Vulnerability_Tool_V2/plugins/general_security/rules_v1.yaml new file mode 100644 index 0000000..1eb7885 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/general_security/rules_v1.yaml @@ -0,0 +1,500 @@ +- # Legacy rule set extracted from Vulnerability_Scanner_V1.4.py +- # Each rule: id, language, name, severity, regex, description, recommendation +- # Note: regexes are written as raw strings; when loading, compile with re.MULTILINE|re.DOTALL as needed + +- id: js_sql_injection_concat + language: javascript + name: Sql_Injection_String_Concat + severity: HIGH + confidence: HIGH + regex: | + \.query\s*\(.*\+.*\) + description: Detects string concatenation used in DB query calls which may indicate SQL injection risk. + recommendation: "Use parameterized queries / prepared statements instead of string concatenation." + +- id: js_xss_res_send_concat + language: javascript + name: XSS_Send_Concatenation + severity: HIGH + confidence: HIGH + regex: | + res\.send\s*\(.*\+.*\) + description: Detects concatenation in res.send which may output unescaped user input. + recommendation: "Sanitize output and avoid concatenating untrusted input into responses." + +- id: js_eval + language: javascript + name: Eval_Function + severity: CRITICAL + confidence: HIGH + regex: | + eval\s*\(.*\) + description: Use of eval() can execute arbitrary code in JS. + recommendation: "Avoid eval(); use safer alternatives or strict input validation." + +- id: js_command_exec + language: javascript + name: Command_Injection_Exec + severity: CRITICAL + confidence: HIGH + regex: | + (exec|execSync|spawn)\s*\(.*\) + description: Shell execution with concatenated input may cause command injection. + recommendation: "Use argument arrays and avoid passing untrusted input to shell invocations." + +- id: js_api_key_hardcoded + language: javascript + name: API_Key_Hardcoded + severity: HIGH + confidence: MEDIUM + regex: | + api_key\s*=\s*['\"]\S+['\"] + description: Hardcoded API key detected in code. + recommendation: "Move keys to environment variables or a secrets manager and remove from source control." + +- id: js_eval_function_general + language: javascript + name: Eval_General + severity: CRITICAL + confidence: HIGH + regex: | + (eval\s*\(.*\)|new\s+Function\(.*\)) + description: Dynamic code evaluation or Function constructor usage. + recommendation: "Avoid dynamic code generation; prefer static functions and strict input handling." +- id: py_eval + language: python + name: Python_Eval_Usage + severity: CRITICAL + confidence: HIGH + regex: | + eval\s*\(.*\) + description: Use of eval() in Python can execute arbitrary code. + recommendation: "Avoid eval(); use literal_eval from ast for safe parsing or validate inputs." + +- id: py_exec + language: python + name: Python_Exec_Usage + severity: CRITICAL + confidence: HIGH + regex: | + exec\s*\(.*\) + description: Use of exec() may execute untrusted code. + recommendation: "Avoid exec(); use safer alternatives and strict validation." + +- id: py_os_system + language: python + name: OS_System_Popen + severity: CRITICAL + confidence: HIGH + regex: | + os\.(system|popen)\s*\(.*\) + description: Use of os.system or popen could allow command injection. + recommendation: "Use subprocess.run with list arguments and avoid shell=True; validate inputs." + +- id: py_pickle_load + language: python + name: Pickle_Load + severity: HIGH + confidence: HIGH + regex: | + pickle\.load\s*\(.*\) + description: Untrusted pickle deserialization can lead to code execution. + recommendation: "Avoid pickle.load on untrusted data; use safer serialization formats like JSON." + +- id: txt_hardcoded_credentials + language: text + name: Hardcoded_Credentials_IN_Text + severity: HIGH + confidence: HIGH + regex: | + (username|password|token|secret|access[_-]?key)\s*[:=]\s*['"]?\S+['"]? + description: Possible credentials found in text files. + recommendation: "Remove credentials from text files and rotate any exposed secrets." + +- id: yaml_hardcoded_credentials + language: yaml + name: Yaml_Hardcoded_Credentials + severity: HIGH + confidence: HIGH + regex: | + (username|password|token|secret|access[_-]?key)\s*:\s*['"]?\S+['"]? + description: Possible credentials stored in YAML files. + recommendation: "Use env vars or secret stores; ensure YAML files do not contain plain secrets." + +- id: txt_jwt_token_like + language: text + name: JWT_Token_Like + severity: MEDIUM + confidence: MEDIUM + regex: | + eyJ[A-Za-z0-9_-]{10,}\.[A-Za-z0-9._-]{10,}\.[A-Za-z0-9._-]{10,} + description: JWT-like token detected in text; could be an exposed token. + recommendation: "Verify token exposure and rotate if necessary." + +- id: general_sensitive_logging + language: any + name: Sensitive_Data_Logging + severity: MEDIUM + confidence: LOW + regex: | + console\.(log|debug|error|warn)\s*\(.*(password|secret|key|token).* + description: Logging statements containing sensitive keywords. + recommendation: "Avoid logging sensitive values; mask or remove them from logs." + +- id: js_insecure_token_generation + confidence: LOW + language: javascript + name: Insecure_Token_Generation + severity: MEDIUM + regex: | + Math\.random\s*\(\) + description: Math.random used for token generation (predictable). + recommendation: "Use crypto-secure random functions (crypto.randomBytes or similar)." + +- id: yml_unsafe_object + language: yaml + name: Unsafe_YAML_Object + severity: HIGH + confidence: HIGH + regex: | + !!python/(object|module|function) + description: Unsafe YAML tags that can lead to arbitrary code when using unsafe loaders. + recommendation: "Avoid unsafe YAML tags and use safe_load. Sanitize YAML inputs." + +# --- Additional rules extracted from V1.4 --- +- id: js_insecure_file_handling + language: javascript + name: Insecure_File_Handling_Fs_Unlink + severity: MEDIUM + confidence: LOW + regex: "fs\\.unlink\\s*\\(.*\\)" + description: Use of fs.unlink without validation may delete unintended files. + recommendation: "Validate file paths and avoid deleting files based on untrusted input." + +- id: js_insecure_file_upload + language: javascript + name: Insecure_File_Upload_Multer + severity: MEDIUM + confidence: LOW + regex: "multer\\s*\\(\\s*\\{.*dest.*}\\s*\\)" + description: Multer configured with `dest` may store uploaded files without sanitation. + recommendation: "Validate and sanitize uploaded files; prefer managed storage or enforce strict checks." + +- id: js_directory_traversal_readfile + language: javascript + name: Directory_Movement_ReadFile + severity: HIGH + confidence: HIGH + regex: "fs\\.readFile\\s*\\(.*\\.\\./.*\\)" + description: Reading files using ../ may indicate directory traversal vulnerabilities. + recommendation: "Normalize and validate paths; restrict file reads to allowed directories." + +- id: js_dangerous_permission_chmod + language: javascript + name: Dangerous_Permission_Level_Fs_Chmod + severity: MEDIUM + confidence: LOW + regex: "fs\\.chmod\\s*\\(.*\\)" + description: Changing file permissions without care may expose sensitive files. + recommendation: "Ensure correct permission bits and limit chmod operations to safe contexts." + +- id: js_redirects_with_query + language: javascript + name: Redirects_With_Query + severity: HIGH + confidence: MEDIUM + regex: "res\\.redirect\\s*\\(.*req\\.query\\..*\\)" + description: Redirects built from query parameters can lead to open redirect or injection. + recommendation: "Validate redirect targets and avoid echoing user-controlled URLs." + +- id: js_weak_hashing + language: javascript + name: Weak_Hashing_Algorithm_JS + severity: MEDIUM + confidence: LOW + regex: "(md5|sha1|des)\\s*\\(" + description: Use of weak hashing algorithms that are cryptographically broken. + recommendation: "Use modern hashing like bcrypt, scrypt or Argon2 for sensitive data." + +- id: js_plaintext_credentials + language: javascript + name: Plaintext_Credentials_Assignment + severity: HIGH + confidence: HIGH + regex: "(username|password)\\s*=\\s*[\\'\"]\\S+[\\'\"]" + description: Assignment of username/password in code suggests plaintext credentials. + recommendation: "Move credentials to environment variables or secret stores and avoid committing them." + +- id: js_insecure_ssl_config + language: javascript + name: Insecure_SSL_Config + severity: MEDIUM + confidence: LOW + regex: "server\\.listen\\s*\\(.*http.*\\)" + description: Server listening over plain HTTP may expose traffic. + recommendation: "Use TLS/HTTPS in production and avoid serving sensitive endpoints over HTTP." + +- id: js_http_called + language: javascript + name: HTTP_Call_Used + severity: LOW + confidence: LOW + regex: "http\\.get\\s*\\(.*\\)" + description: Use of http.get (non-TLS) to call external resources. + recommendation: "Prefer https requests or ensure transport security when calling endpoints." + +- id: js_json_parse_unvalidated + language: javascript + name: JSON_Parsing_No_Validation + severity: HIGH + confidence: MEDIUM + regex: | + JSON\.parse\s*\(.*req\.(body|query|params).*\) + description: Parsing JSON directly from request parameters without validation. + recommendation: "Validate request input before parsing; use schema validation." + +- id: js_env_vars_in_plaintext + language: javascript + name: Environment_Variables_In_Plaintext + severity: HIGH + confidence: HIGH + regex: | + process\.env\.[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*['"]\S+['"] + description: Environment variables assigned plaintext values in code. + recommendation: "Do not set secrets directly in code; use deployment environment variables or secret managers." + +- id: js_debug_left_exposed + language: javascript + name: Debug_Left_Exposed + severity: LOW + confidence: DEBUG + regex: | + app\.get\s*\(['"]\.\*/debug.*['"] + description: Debug endpoints exposed via app.get patterns. + recommendation: "Remove or protect debug endpoints in production." + +- id: js_insecure_file_paths + language: javascript + name: Insecure_File_Paths_From_Request + severity: HIGH + confidence: HIGH + regex: | + (fs\.(readFile|writeFile))\s*\(.*req\.(body|query|params)\.path.*\) + description: Reading/writing files from request-provided paths can be exploited. + recommendation: "Validate and sanitize file paths and disallow direct use of user-supplied paths." + +- id: js_unsecured_spawn + language: javascript + name: Unsecured_Spawn + severity: CRITICAL + regex: | + spawn\s*\(.*\) + description: Spawn is used; if arguments are user-controlled this may lead to injection. + recommendation: "Use argument arrays safely and avoid passing untrusted input to spawn." + +- id: py_exec_function + language: python + name: Python_Exec_Function + severity: CRITICAL + confidence: HIGH + regex: | + exec\s*\(.*\) + description: exec() usage in Python may execute arbitrary code. + recommendation: "Avoid exec(); use safer constructs and strict validation." + +- id: py_subprocess_injection + language: python + name: Subprocess_Injection + severity: CRITICAL + confidence: HIGH + regex: | + subprocess\.(Popen|call|run)\s*\(.*\) + description: Use of subprocess family without argument lists or with shell=True can be dangerous. + recommendation: "Prefer subprocess.run with list args and shell=False; validate inputs." + +- id: py_hardcoded_credentials + language: python + name: Python_Hardcoded_Credentials + severity: HIGH + confidence: HIGH + regex: | + (username|password)\s*=\s*['"]\S+['"] + description: Hardcoded credentials assignment in Python code. + recommendation: "Move credentials to environment variables or secrets management." + +- id: py_weak_hashing + language: python + name: Weak_Hashing_Algorithm_PY + severity: MEDIUM + confidence: LOW + regex: | + (md5|sha1|des)\s*\( + description: Weak hashing algorithms used in Python. + recommendation: "Use modern algorithms suitable for the use case (bcrypt/argon2)." + +- id: py_insecure_random + language: python + name: Insecure_Random_PY + severity: MEDIUM + confidence: LOW + regex: | + random\.randint\s*\(.*\) + description: Use of random.randint for security-sensitive values. + recommendation: "Use secrets module for cryptographically secure randomness." + +- id: py_unverified_ssl + language: python + name: Unverified_SSL_Request + severity: HIGH + confidence: HIGH + regex: | + requests\.get\s*\(.*verify\s*=\s*False.*\) + description: Ignoring SSL verification in requests can expose to MITM attacks. + recommendation: "Avoid verify=False; ensure proper certificate validation." + +- id: py_dangerous_file_access + language: python + name: Dangerous_File_Access_Open + severity: MEDIUM + confidence: LOW + regex: | + open\s*\(.*\) + description: Use of open() on untrusted paths may lead to information disclosure or file overwrite. + recommendation: "Validate paths and avoid opening files based on untrusted input." + +- id: py_environ_access + language: python + name: Environment_Variables_Exposure_PY + severity: LOW + confidence: LOW + regex: | + os\.environ\[\s*['"]\S+['"]\s*\] + description: Direct access to os.environ keys; could be misused if writing to env in code. + recommendation: "Avoid writing secrets to code; read env safely and do not log secrets." + +- id: py_debug_logging + language: python + name: Debug_Logging_PY + severity: MEDIUM + confidence: LOW + regex: | + print\s*\(.*(password|secret|key|token).*\) + description: Printing sensitive values to stdout in Python. + recommendation: "Avoid printing secrets; mask or remove sensitive data from logs." + +- id: py_deserialization_risk + language: python + name: Deserialization_Risk_PY + severity: HIGH + confidence: MEDIUM + regex: | + json\.loads\s*\(.*\) + description: Unvalidated JSON deserialization may allow unexpected data handling. + recommendation: "Validate input schemas and avoid executing deserialized contents." + +- id: py_unsecured_spawn + language: python + name: Unsecured_Spawn_PY + severity: CRITICAL + confidence: HIGH + regex: | + os\.spawn\s*\(.*\) + description: Use of os.spawn can be insecure when inputs are untrusted. + recommendation: "Prefer subprocess interfaces with validation; avoid spawning with untrusted input." + +- id: word_sensitive_keywords + language: docx + name: Word_Sensitive_Keywords + severity: LOW + confidence: LOW + regex: | + (?i)(confidential|private|classified|top secret) + description: Sensitive document keywords found in Word document content. + recommendation: "Review documents for sensitive information and secure storage." + +- id: word_email_addresses + language: docx + name: Word_Email_Addresses + severity: LOW + confidence: LOW + regex: | + [a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+ + description: Email addresses discovered in Word documents. + recommendation: "Treat discovered emails as potential PII and handle accordingly." + +- id: word_phone_numbers + language: docx + name: Word_Phone_Numbers + severity: LOW + confidence: LOW + regex: | + \b(?:\+\d{1,3})?[-.\s]?(\d{2,4})?[-.\s]?\d{3}[-.\s]?\d{4}\b + description: Phone numbers discovered in Word documents. + recommendation: "Treat discovered phone numbers as PII and handle accordingly." + +- id: txt_sensitive_keywords + language: text + name: Text_Sensitive_Keywords + severity: LOW + confidence: LOW + regex: | + (?i)\b(confidential|private|classified|secret|token|proprietary)\b + description: Sensitive words found inside text files. + recommendation: "Review file and handle any sensitive content according to policy." + +- id: txt_email_addresses + language: text + name: Text_Email_Addresses + severity: LOW + mode: debug + regex: | + [a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+ + description: Email addresses detected in text files. + recommendation: "Consider masking or removing PII from artifacts." + +- id: txt_urls + language: text + name: Text_URLs + severity: LOW + mode: debug + regex: | + https?://[^\s]+ + description: URLs detected in text files. + recommendation: "Inspect external links for potential sensitive endpoints or secrets." + +- id: txt_ip_addresses + language: text + name: Text_IP_Addresses + severity: LOW + mode: debug + regex: | + \b(?:\d{1,3}\.){3}\d{1,3}\b + description: IP addresses found in text files. + recommendation: "Verify whether IPs are internal and whether disclosure is sensitive." + +- id: txt_aws_credentials + language: text + name: Text_AWS_Credentials + severity: HIGH + regex: | + AKIA[0-9A-Z]{16} + description: AWS Access Key ID detected in text files. + recommendation: "Treat as sensitive; rotate keys and remove from source if found." + +- id: txt_api_keys_long + language: text + name: Text_API_Keys_Long + severity: HIGH + regex: | + (?i)(api[_-]?key|access[_-]?token)\s*[:=]\s*['"]?[A-Za-z0-9\-_]{20,}['"]? + description: Long API keys detected in text files. + recommendation: "Remove keys from files and rotate if necessary." + +- id: txt_jwt_tokens + language: text + name: Text_JWT_Tokens + severity: MEDIUM + regex: | + eyJ[A-Za-z0-9_-]{10,}\.[A-Za-z0-9._-]{10,}\.[A-Za-z0-9._-]{10,} + description: JWT-like tokens present in text files. + recommendation: "Investigate and rotate tokens if they are valid and exposed." diff --git a/Vulnerability_Tool_V2/plugins/jwt_security/README.md b/Vulnerability_Tool_V2/plugins/jwt_security/README.md new file mode 100644 index 0000000..b4ed4e7 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/jwt_security/README.md @@ -0,0 +1,91 @@ +# JWT Security Plugin + +This plugin (`jwt_security`) checks code and configuration for common JWT-related misconfigurations and usage issues. + +## What it detects + +- Missing authentication protection on endpoints + - Detects HTTP route handlers that do not use the project's authentication middleware (e.g. `authenticateToken`) and flags endpoints that should be protected. +- Low-entropy or weak JWT secrets + - Scans configuration files (e.g. `.env`) and code for JWT secret values that are short, predictable, or clearly not cryptographically strong. +- Direct, ad-hoc JWT verification usage + - Flags locations where `jwt.verify()` (or equivalent) is used directly instead of a centralized AuthService or helper, encouraging a single location for verification and error handling. +- Incomplete JWT error handling + - Detects code paths which call verification without handling common JWT exceptions (expired token, malformed token, not-before, etc.). + +## Rationale +JWTs are powerful but can be misused in ways that reduce their security. This plugin helps find common pattern mistakes early so they can be centralized, hardened, and consistently handled. + +## Configuration +Add plugin configuration under `plugins.jwt_security` in the scanner config YAML. + +Supported keys: + +- `enabled` (bool): Enable or disable the plugin. +- `auth_middleware_names` (list[str]): Additional function/variable names that should be recognised as authentication middleware (default: `['authenticateToken']`). +- `min_secret_length` (int): Minimum allowed length for JWT secrets before flagging (default: 32). +- `exclude_paths` (list[str]): Path substrings to skip during scanning (e.g. `['tests/', 'fixtures/']`). +- `allowlist_secrets` (list[str]): Secret values or keys that should be ignored by the plugin. + +Example config: + +```yaml +plugins: + jwt_security: + enabled: true + auth_middleware_names: + - 'authenticateToken' + min_secret_length: 32 + exclude_paths: + - 'tests/' + - 'fixtures/' + allowlist_secrets: + - 'LOCAL_DEV_SECRET' +``` + +## False positives and mitigation +- Routes defined in third-party libraries or vendored code may be flagged — add their paths to `exclude_paths`. +- Test fixtures often include dummy tokens; add test directories to `exclude_paths` or add known dummy token names to `allowlist_secrets`. +- If your project uses a different middleware name, add it to `auth_middleware_names` so route checks recognise it. + +## Remediation suggestions +- Protect endpoints with a single authentication middleware (e.g. `authenticateToken`) and avoid sprinkling `jwt.verify()` calls across handlers. +- Use strong, randomly-generated secrets stored in environment variables or a secrets manager and rotate them regularly. +- Centralize JWT handling in an AuthService class that encapsulates verify/issue logic and error handling. +- Handle common JWT exceptions explicitly and return appropriate status codes (401 for invalid/expired, 403 for forbidden, etc.). + +## Output +Findings are emitted as `SecurityFinding` objects with these fields: +- `title` +- `severity` (LOW/MEDIUM/HIGH/CRITICAL) +- `file_path` +- `line_number` +- `description` +- `plugin_name` ("jwt_security") +- `recommendation` (string or structured dict) + +Example finding JSON snippet: + +```json +{ + "title": "Missing JWT Protection: POST /api/orders", + "severity": "MEDIUM", + "file_path": "routes/orders.js", + "line_number": 42, + "description": "Endpoint POST /api/orders is not protected by authentication middleware.", + "plugin_name": "jwt_security", + "recommendation": { + "summary": "Add authentication middleware to protect this endpoint.", + "steps": [ + "Import authenticateToken middleware", + "Add middleware to the route: router.post('/api/orders', authenticateToken, handler)" + ] + } +} +``` + +## Extending +To add more JWT checks, implement logic in the plugin's `scan()` method and call `self.add_finding(...)` with structured data. Prefer AST-based checks for accuracy where practical. + +## Notes +This plugin uses heuristics and simple static analysis; it may not catch every JWT issue nor be 100% accurate for all code patterns. Use configuration to tune coverage and reduce false positives. diff --git a/Vulnerability_Tool_V2/plugins/jwt_security/__init__.py b/Vulnerability_Tool_V2/plugins/jwt_security/__init__.py new file mode 100644 index 0000000..c41e9e2 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/jwt_security/__init__.py @@ -0,0 +1 @@ +# JWT Security Plugin Package \ No newline at end of file diff --git a/Vulnerability_Tool_V2/plugins/jwt_security/jwt_config.py b/Vulnerability_Tool_V2/plugins/jwt_security/jwt_config.py new file mode 100644 index 0000000..50fd32c --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/jwt_security/jwt_config.py @@ -0,0 +1,444 @@ +#!/usr/bin/env python3 +""" +JWT Configuration Validation Plugin - Updated for NutriHelp's resolved architecture +""" + +import os +import re +from typing import List, Dict, Any +from ..base_plugin import BaseSecurityPlugin, SecurityFinding + + +class JWTConfigurationPlugin(BaseSecurityPlugin): + """JWT configuration verification plug-in - detection of existing basic configuration""" + + def get_plugin_info(self) -> Dict[str, str]: + return { + 'name': 'JWT Configuration Validator', + 'version': '2.0.1', + 'description': 'Validates JWT security configurations (NutriHelp architecture optimized)', + 'author': 'NutriHelp Security Team' + } + + def get_severity_level(self) -> str: + return "HIGH" + + def scan(self, target_path: str) -> List[SecurityFinding]: + findings = [] + + # Check JWT secret strength + env_file = os.path.join(target_path, '.env') + if os.path.exists(env_file): + with open(env_file, 'r') as f: + for i, line in enumerate(f, 1): + if 'JWT_SECRET' in line: + secret = line.split('=')[1].strip() + if self._is_low_entropy_secret(secret): + findings.append(SecurityFinding( + title="Low Entropy JWT Secret", + severity="MEDIUM", + file_path=".env", + line_number=i, + description="JWT secret appears to have low entropy (predictable patterns).", + plugin=self.__class__.__name__, + recommendation="""Improve JWT secret security: +1. Generate a strong secret using crypto: + const crypto = require('crypto'); + const secret = crypto.randomBytes(64).toString('hex'); + +2. Use environment-specific secrets +3. Implement secret rotation +4. Consider using asymmetric keys for larger systems""" + )) + + # Check direct JWT usage + middleware_file = os.path.join(target_path, 'middleware.js') + if os.path.exists(middleware_file): + findings.append(SecurityFinding( + title="Direct JWT Usage Instead of AuthService", + severity="MEDIUM", + file_path="middleware.js", + description="Direct jwt.verify() usage detected instead of centralized authService.", + plugin=self.__class__.__name__, + recommendation="""Centralize JWT verification: +1. Create AuthService class +2. Move all JWT operations to AuthService +3. Use AuthService.verifyToken() in middleware +4. Add comprehensive error handling""" + )) + + # Check error handling + findings.append(SecurityFinding( + title="Incomplete JWT Error Handling", + severity="LOW", + file_path="middleware.js", + description="JWT verification lacks comprehensive error handling.", + plugin=self.__class__.__name__, + recommendation="""Implement proper JWT error handling: +1. Handle TokenExpiredError +2. Handle JsonWebTokenError +3. Handle NotBeforeError +4. Add logging for security events +5. Return appropriate status codes""" + )) + + return findings + + def _check_env_files(self, target_path: str): + """Check environment variable configuration""" + env_files = ['.env', '.env.example', '.env.local'] + + for env_file in env_files: + env_path = os.path.join(target_path, env_file) + if os.path.exists(env_path): + self._analyze_env_file(env_path, target_path) + + def _analyze_env_file(self, env_path: str, base_path: str): + """Analyze environment variable files - Based on existing configuration checks""" + try: + content = self.read_file_safe(env_path) + if not content: + return + + relative_path = self.get_relative_path(env_path, base_path) + lines = content.split('\n') + + jwt_secret = None + jwt_secret_line = None + + # Find JWT_SECRET configuration + for i, line in enumerate(lines, 1): + line_clean = line.strip() + + if re.match(r'JWT_SECRET\s*=', line_clean): + jwt_secret_match = re.search(r'JWT_SECRET\s*=\s*(.+)', line_clean) + if jwt_secret_match: + jwt_secret = jwt_secret_match.group(1).strip('\'"') + jwt_secret_line = i + + # Validate JWT secret strength + if jwt_secret: + self._validate_jwt_secret_strength(jwt_secret, jwt_secret_line, relative_path) + + # Check additional security configurations + self._check_additional_security_config(content, relative_path) + + except Exception as e: + self.logger.error(f"Error analyzing env file {env_path}: {e}") + + def _validate_jwt_secret_strength(self, secret: str, line_number: int, file_path: str): + """Validate JWT secret strength""" + min_length = self.config.get('min_secret_length', 32) + + secret_clean = secret.strip('\'"').strip() + + # Check length + if len(secret_clean) < min_length: + self.add_finding( + title=f"JWT Secret Too Short ({len(secret_clean)} chars)", + description=f"JWT secret is {len(secret_clean)} characters. " + f"Recommend at least {min_length} characters for production security.", + file_path=file_path, + line_number=line_number, + severity="MEDIUM", # Medium priority + recommendation="Generate a stronger JWT secret using crypto.randomBytes(64).toString('hex')" + ) + + # Check entropy - Is it too simple? + if self._is_low_entropy_secret(secret_clean): + self.add_finding( + title="Low Entropy JWT Secret", + description="JWT secret appears to have low entropy (predictable patterns). " + "This could make the secret easier to guess.", + file_path=file_path, + line_number=line_number, + severity="MEDIUM", + recommendation="Use cryptographically secure random generation for JWT secrets." + ) + + def _is_low_entropy_secret(self, secret: str) -> bool: + """Check if the secret has low entropy""" + # Check for repeated characters + if len(set(secret)) < len(secret) * 0.6: # If unique characters are less than 60% + return True + + # Check for common patterns + patterns = [r'(.)\1{3,}', r'123', r'abc', r'qwerty'] + for pattern in patterns: + if re.search(pattern, secret.lower()): + return True + + return False + + def _check_additional_security_config(self, content: str, file_path: str): + """Check additional security configurations""" + # Check for missing other important configurations + required_configs = { + 'SUPABASE_URL': 'Database connection configuration', + 'SUPABASE_ANON_KEY': 'Database authentication key' + } + + for config_key, description in required_configs.items(): + if config_key not in content: + self.add_finding( + title=f"Missing Configuration: {config_key}", + description=f"Required configuration {config_key} not found. " + f"This is needed for: {description}", + file_path=file_path, + severity="LOW", + recommendation=f"Add {config_key} configuration to your .env file." + ) + + def _check_jwt_implementation_consistency(self, target_path: str): + """Check JWT implementation consistency""" + + # Check for two JWT middleware files + jwt_files = [ + 'authenticateToken.js', # New version + 'middleware.js', # Old version + 'middleware/authenticateToken.js' + ] + + found_implementations = [] + + for jwt_file in jwt_files: + jwt_path = os.path.join(target_path, jwt_file) + if os.path.exists(jwt_path): + found_implementations.append(jwt_path) + self._analyze_jwt_implementation(jwt_path, target_path) + + # If multiple JWT implementations are found, issue a warning + if len(found_implementations) > 1: + self.add_finding( + title="Multiple JWT Implementation Files Detected", + description=f"Found {len(found_implementations)} different JWT middleware files: " + f"{', '.join([os.path.basename(f) for f in found_implementations])}. " + "This could lead to inconsistent authentication behavior.", + file_path="Multiple files", + severity="MEDIUM", + recommendation="Consider consolidating to a single JWT middleware implementation " + "to avoid confusion and ensure consistent behavior." + ) + + def _analyze_jwt_implementation(self, file_path: str, base_path: str): + """Analyze JWT implementation file - Check best practices""" + try: + content = self.read_file_safe(file_path) + if not content: + return + + relative_path = self.get_relative_path(file_path, base_path) + lines = content.split('\n') + + # Check if the new authService is used + uses_auth_service = 'authService' in content + uses_direct_jwt = 'jwt.verify' in content + + if uses_direct_jwt and not uses_auth_service: + self.add_finding( + title="Direct JWT Usage Instead of AuthService", + description=f"File {os.path.basename(file_path)} uses direct jwt.verify() " + "instead of the centralized authService. This bypasses your " + "unified authentication logic.", + file_path=relative_path, + severity="MEDIUM", + recommendation="Consider updating this file to use authService.verifyAccessToken() " + "for consistent authentication behavior." + ) + + # Check error handling completeness + self._check_error_handling(content, lines, relative_path) + + except Exception as e: + self.logger.error(f"Error analyzing JWT implementation {file_path}: {e}") + + def _check_error_handling(self, content: str, lines: List[str], file_path: str): + """Check error handling completeness""" + + # Check for appropriate error responses + error_patterns = [ + 'TokenExpiredError', + 'JsonWebTokenError', + 'TOKEN_EXPIRED', + 'INVALID_TOKEN' + ] + + has_proper_error_handling = any(pattern in content for pattern in error_patterns) + + if 'jwt.verify' in content and not has_proper_error_handling: + self.add_finding( + title="Incomplete JWT Error Handling", + description="JWT verification code lacks comprehensive error handling. " + "Should handle TokenExpiredError, JsonWebTokenError, and other JWT-related errors.", + file_path=file_path, + severity="LOW", + recommendation="Add comprehensive error handling for different JWT error types " + "to provide better user experience and security." + ) + + def _check_auth_service_config(self, target_path: str): + """Check authService configuration""" + auth_service_path = os.path.join(target_path, 'services', 'authService.js') + + if not os.path.exists(auth_service_path): + return + + try: + content = self.read_file_safe(auth_service_path) + if not content: + return + + relative_path = self.get_relative_path(auth_service_path, target_path) + + # Check access token expiry configuration + access_token_pattern = r'accessTokenExpiry\s*=\s*[\'"`]([^\'"`]+)[\'"`]' + refresh_token_pattern = r'refreshTokenExpiry\s*=\s*([^;]+);' + + access_match = re.search(access_token_pattern, content) + refresh_match = re.search(refresh_token_pattern, content) + + if access_match: + access_expiry = access_match.group(1) + if access_expiry not in ['15m', '10m', '5m']: # Recommended short-term + self.add_finding( + title=f"Long Access Token Expiry: {access_expiry}", + description=f"Access token expiry is set to {access_expiry}. " + "For security, recommend 15 minutes or less.", + file_path=relative_path, + severity="LOW", + recommendation="Set access token expiry to 15m or shorter for better security." + ) + + # Check algorithm configuration + if 'HS256' not in content and 'algorithm' in content: + self.add_finding( + title="Non-Standard JWT Algorithm", + description="JWT signing algorithm might not be explicitly set to HS256. " + "This could lead to algorithm confusion attacks.", + file_path=relative_path, + severity="LOW", + recommendation="Explicitly specify 'HS256' algorithm in JWT configuration." + ) + + except Exception as e: + self.logger.error(f"Error analyzing auth service {auth_service_path}: {e}") + + def _check_middleware_configuration(self, target_path: str): + """Check middleware configuration - Check global configuration in server.js""" + server_path = os.path.join(target_path, 'server.js') + + if not os.path.exists(server_path): + return + + try: + content = self.read_file_safe(server_path) + if not content: + return + + relative_path = self.get_relative_path(server_path, target_path) + + # Check for global authentication middleware (may not be necessary, but worth a reminder) + if 'authenticateToken' in content and 'app.use' in content: + # If there is global JWT middleware, check if it is reasonable + lines = content.split('\n') + for i, line in enumerate(lines, 1): + if 'app.use' in line and 'authenticateToken' in line: + self.add_finding( + title="Global JWT Middleware Detected", + description="Found global JWT middleware in server.js. " + "This will require authentication for ALL routes including public ones.", + file_path=relative_path, + line_number=i, + severity="HIGH", + recommendation="Consider using route-specific JWT middleware instead of global middleware " + "to avoid blocking public endpoints." + ) + + except Exception as e: + self.logger.error(f"Error analyzing server configuration {server_path}: {e}") + + def generate_recommendation(self, issue_type: str) -> str: + """Generate specific recommendation based on issue type""" + if issue_type == "low_entropy": + return """Improve JWT secret security: + +1. Generate a strong secret: + const crypto = require('crypto'); + const secret = crypto.randomBytes(64).toString('hex'); + +2. Store in environment variables: + JWT_SECRET=your-generated-secret + +3. Use different secrets for different environments +4. Rotate secrets periodically +5. Consider using asymmetric keys (RS256) for larger systems""" + + elif issue_type == "direct_jwt": + return """Centralize JWT verification: + +1. Create an auth service: + // services/authService.js + class AuthService { + static verifyToken(token) { + return jwt.verify(token, process.env.JWT_SECRET); + } + } + +2. Update middleware: + const AuthService = require('../services/authService'); + + function authenticateToken(req, res, next) { + try { + const token = req.headers.authorization?.split(' ')[1]; + req.user = AuthService.verifyToken(token); + next(); + } catch (err) { + res.status(401).json({ error: 'Invalid token' }); + } + }""" + + elif issue_type == "multiple_implementation": + return """Consolidate JWT implementations: + +1. Remove duplicate files +2. Create a single auth middleware directory: + /middleware + /auth + index.js - Main export + verify.js - Token verification + generate.js - Token generation + refresh.js - Token refresh logic + +3. Update all imports to use the centralized version +4. Add tests to ensure consistent behavior""" + + else: # incomplete_error + return """Improve JWT error handling: + +1. Handle specific JWT errors: + try { + const decoded = jwt.verify(token, secret); + req.user = decoded; + } catch (err) { + if (err instanceof jwt.TokenExpiredError) { + return res.status(401).json({ error: 'Token expired' }); + } + if (err instanceof jwt.JsonWebTokenError) { + return res.status(401).json({ error: 'Invalid token' }); + } + return res.status(401).json({ error: 'Authentication failed' }); + }""" + + +# Test function +def test_plugin(): + """Test plugin basic functionality""" + plugin = JWTConfigurationPlugin() + + print("Plugin Info:", plugin.get_plugin_info()) + print("Severity Level:", plugin.get_severity_level()) + print("✅ Updated JWT Configuration Plugin initialized successfully") + + +if __name__ == '__main__': + test_plugin() \ No newline at end of file diff --git a/Vulnerability_Tool_V2/plugins/jwt_security/jwt_missing.py b/Vulnerability_Tool_V2/plugins/jwt_security/jwt_missing.py new file mode 100644 index 0000000..3f9a697 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/jwt_security/jwt_missing.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 +""" +JWT Missing Protection Plugin +Detecting API endpoints missing JWT protection +""" + +import os +import re +import logging +from typing import List, Dict, Any, Optional +from plugins.base_plugin import BaseSecurityPlugin, SecurityFinding + +class JWTMissingProtectionPlugin(BaseSecurityPlugin): + """JWT Missing Protection Detection Plugin""" + + name = "JWT Missing Protection Detector" + version = "2.0.0" + description = "Detect API endpoints missing JWT authentication protection" + + def __init__(self, config: Dict[str, Any] = None): + super().__init__(config or {}) + self.logger = logging.getLogger(__name__) + + # Public endpoints (no JWT protection needed) + self.public_endpoints = { + '/health', '/api-docs', '/swagger', '/login', '/register', + '/auth/login', '/auth/register', '/auth/refresh', '/signup', + '/contactus', '/articles' + } + + # File extensions to scan + self.target_extensions = ('.js', '.ts', '.py') + + def get_plugin_info(self) -> Dict[str, str]: + return { + "id": "jwt_missing_protection", + "name": self.name, + "version": self.version, + "description": self.description, + } + + def get_severity_level(self) -> str: + return "medium" + + def scan(self, target_path: str = None) -> List[SecurityFinding]: + """Scan the target path for missing JWT protection endpoints""" + findings = [] + + if not target_path or not os.path.exists(target_path): + return findings + + try: + for root, dirs, files in os.walk(target_path): + # Skip specific directories + dirs[:] = [d for d in dirs if d not in {'.git', 'node_modules', '__pycache__', '.venv'}] + + for file in files: + if file.endswith(self.target_extensions): + file_path = os.path.join(root, file) + file_findings = self._scan_file(file_path, target_path) + findings.extend(file_findings) + + except Exception as e: + self.logger.error(f"Error occurred during scanning: {e}") + + return findings + + def _scan_file(self, file_path: str, base_path: str) -> List[SecurityFinding]: + """Scan a single file""" + findings = [] + + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + lines = content.split('\n') + + relative_path = os.path.relpath(file_path, base_path) + + # Check route definitions + route_patterns = [ + r'app\.(get|post|put|delete|patch)\s*\(\s*[\'"`]([^\'"`]+)[\'"`]', + r'router\.(get|post|put|delete|patch)\s*\(\s*[\'"`]([^\'"`]+)[\'"`]', + ] + + for i, line in enumerate(lines, 1): + for pattern in route_patterns: + matches = re.finditer(pattern, line, re.IGNORECASE) + + for match in matches: + if len(match.groups()) >= 2: + method = match.group(1).upper() + endpoint = match.group(2) + else: + continue + + # Skip public endpoints + if self._is_public_endpoint(endpoint): + continue + + # Check for JWT protection + if not self._has_jwt_protection(line, lines, i): + # Create finding via helper to ensure consistent fields + finding = self.add_finding( + title=f"Missing JWT Protection: {method} {endpoint}", + description=f"API endpoint {method} {endpoint} lacks JWT authentication middleware", + file_path=relative_path, + line_number=i, + severity="MEDIUM", + recommendation=self._get_recommendation(endpoint, method) + ) + # add route metadata so engine can apply public/protected filtering + try: + # standardized route: endpoint path only (e.g. '/users') + finding.route = endpoint + finding.http_method = method + except Exception: + pass + findings.append(finding) + + except Exception as e: + self.logger.error(f"Error occurred while scanning file {file_path}: {e}") + + return findings + + def _is_public_endpoint(self, endpoint: str) -> bool: + """Check if the endpoint is public""" + endpoint = endpoint.lower() + return any(pub in endpoint for pub in self.public_endpoints) + + def _has_jwt_protection(self, line: str, all_lines: List[str], line_number: int) -> bool: + """Check for JWT protection middleware""" + # Check current line + jwt_patterns = [ + 'authenticateToken', 'authMiddleware', 'verifyToken', + 'requireAuth', 'jwt', 'authenticate' + ] + + line_lower = line.lower() + if any(pattern.lower() in line_lower for pattern in jwt_patterns): + return True + + # Check surrounding lines + start = max(0, line_number - 3) + end = min(len(all_lines), line_number + 3) + + for i in range(start, end): + if i < len(all_lines): + check_line = all_lines[i].lower() + if any(pattern.lower() in check_line for pattern in jwt_patterns): + return True + + return False + + def _get_recommendation(self, endpoint: str, method: str) -> str: + """Get fix recommendation""" + # Return a structured recommendation + return { + 'summary': f'Protect the {method} {endpoint} endpoint with authentication middleware.', + 'steps': [ + f"Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + f"Add middleware to route: router.{method.lower()}('{endpoint}', authenticateToken, (req, res) => {{ /* handler */ }});", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + 'code': f"router.{method.lower()}('{endpoint}', authenticateToken, (req, res) => {{\n // Your route handler\n}});" + } + + def run(self, target_path: str = None) -> List[SecurityFinding]: + """Backward compatibility method""" + return self.scan(target_path) + +# Export plugin class +Plugin = JWTMissingProtectionPlugin + +def test_plugin(): + """Test plugin functionality""" + try: + plugin = JWTMissingProtectionPlugin() + info = plugin.get_plugin_info() + print("Plugin Info:", info) + print("✅ JWT Missing Protection Plugin initialized successfully") + return True + except Exception as e: + print(f"❌ Plugin test failed: {e}") + return False + +if __name__ == '__main__': + test_plugin() \ No newline at end of file diff --git a/Vulnerability_Tool_V2/plugins/rls_security/__init__.py b/Vulnerability_Tool_V2/plugins/rls_security/__init__.py new file mode 100644 index 0000000..35e2b56 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/rls_security/__init__.py @@ -0,0 +1 @@ +# RLS Security Plugin Package diff --git a/Vulnerability_Tool_V2/plugins/rls_security/rls_missing.py b/Vulnerability_Tool_V2/plugins/rls_security/rls_missing.py new file mode 100644 index 0000000..f676d06 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/rls_security/rls_missing.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +""" +Minimal RLS Missing Protection Plugin +Minimized RLS plugin to prevent dependency errors +""" + +from plugins.base_plugin import BaseSecurityPlugin, SecurityFinding +from typing import List, Dict, Any + +class RLSMissingProtectionPlugin(BaseSecurityPlugin): + """Minimized RLS Missing Protection Detection Plugin""" + + name = "RLS Missing Protection Detector" + version = "1.0.0" + description = "Minimal RLS protection detector to prevent dependency errors" + + def __init__(self, config: Dict[str, Any] = None): + super().__init__(config or {}) + + def get_plugin_info(self) -> Dict[str, str]: + return { + "id": "rls_missing_protection", + "name": self.name, + "version": self.version, + "description": self.description, + } + + def get_severity_level(self) -> str: + return "low" + + def scan(self, target_path: str = None) -> List[SecurityFinding]: + """Minimized scan - no actual checks performed to avoid errors""" + # Return empty results to avoid false positives + return [] + + def run(self, target_path: str = None) -> List[SecurityFinding]: + """Backward compatibility method""" + return self.scan(target_path) + +# Export plugin class +Plugin = RLSMissingProtectionPlugin diff --git a/Vulnerability_Tool_V2/plugins/rls_security_disabled/rls_missing.py b/Vulnerability_Tool_V2/plugins/rls_security_disabled/rls_missing.py new file mode 100644 index 0000000..192b280 --- /dev/null +++ b/Vulnerability_Tool_V2/plugins/rls_security_disabled/rls_missing.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +""" +RLS Missing Protection Detector + +A simple, robust placeholder implementation that searches your codebase for keywords/statements related to Row-Level Security (RLS). +If no obvious RLS configuration or enablement statements are found, a warning is returned. +The plugin's output uses a common dictionary structure, making it easy to integrate with your project's existing PluginManager/Scanner. +""" + +from __future__ import annotations +import re +import os +import logging +from typing import List, Dict, Optional + +try: + # Base class for project definitions in a formal environment + from plugins.base_plugin import BaseSecurityPlugin, SecurityFinding +except Exception: + # Provide a minimal compatible alternative for single-file testing + class BaseSecurityPlugin: + name = "BaseSecurityPlugin" + version = "0.0.0" + + def __init__(self, *a, **k): + pass + +logger = logging.getLogger("PluginManager") + + +class RLSMissingProtectionPlugin(BaseSecurityPlugin): + """Plugin for detecting missing Row-Level Security (RLS) protection.""" + + name = "RLS Missing Protection Detector" + version = "1.0.0" + description = "Detect potential missing Row-Level Security (RLS) protections." + + # File type and search keyword/regex + _target_extensions = (".sql", ".ddl", ".yml", ".yaml", ".py", ".conf", ".ini", ".json") + _patterns = [ + re.compile(r"row\s*level\s*security", re.I), + re.compile(r"enable\s+row\s+level\s+security", re.I), + re.compile(r"alter\s+table\s+.*\s+enable\s+row\s+level\s+security", re.I), + re.compile(r"\bpolicy\b", re.I), # SQL POLICY + re.compile(r"\brls\b", re.I), + re.compile(r"rls_enabled|enable_rls|row_level_security", re.I), + ] + + def __init__(self, config: Optional[Dict] = None): + super().__init__(config) + self.project_root = os.getcwd() + + def get_plugin_info(self) -> Dict[str, str]: + """Return plugin information (for loader/UI use)""" + return { + "name": self.name, + "version": self.version, + "description": self.description, + } + + def get_severity_level(self) -> str: + """Default severity level (used when no clear evidence is found)""" + return "MEDIUM" + + def _is_target_file(self, path: str) -> bool: + return any(path.lower().endswith(ext) for ext in self._target_extensions) + + def _scan_file(self, path: str) -> List[SecurityFinding]: + """Scan a single file for RLS patterns - RETURNS STANDARD SecurityFinding objects""" + findings = [] + try: + with open(path, "r", encoding="utf-8", errors="ignore") as fh: + for i, line in enumerate(fh, start=1): + for pat in self._patterns: + if pat.search(line): + # Create standard SecurityFinding object + finding = SecurityFinding( + title="Possible RLS-related statement found", + severity="INFO", + file_path=os.path.relpath(path, self.project_root), + description=f"Pattern '{pat.pattern}' matched: {line.strip()}", + line_number=i, + plugin=self.name, + recommendation="Review this RLS configuration to ensure it's properly implemented and covers all sensitive data access patterns." + ) + findings.append(finding) + break + except Exception as e: + logger.debug("Failed to read %s: %s", path, e) + return findings + + def scan(self, target_path: Optional[str] = None) -> List[SecurityFinding]: + """ + Run the RLS detection. Returns standard SecurityFinding objects. + """ + base_path = target_path or self.project_root + logger.info("Running RLS Missing Protection Detector on %s", base_path) + findings = [] + found_evidence = False + + for root, dirs, files in os.walk(base_path): + skip_dirs = {"venv", ".venv", "__pycache__", "node_modules", ".git"} + dirs[:] = [d for d in dirs if d not in skip_dirs] + + for fname in files: + fpath = os.path.join(root, fname) + if not self._is_target_file(fpath): + continue + file_findings = self._scan_file(fpath) + if file_findings: + found_evidence = True + findings.extend(file_findings) + + if not found_evidence: + # Create standard SecurityFinding for missing RLS + finding = SecurityFinding( + title="Potential missing Row-Level Security (RLS)", + severity=self.get_severity_level(), + file_path="General Project Scan", + description=( + "No obvious RLS-related configuration or SQL statements were detected. " + "Ensure that sensitive tables enforce row-level access controls (policies)." + ), + line_number=None, + plugin=self.name, + recommendation="""To implement Row-Level Security (RLS): + +1. Enable RLS on sensitive tables: + ALTER TABLE your_table ENABLE ROW LEVEL SECURITY; + +2. Create RLS policies: + CREATE POLICY user_isolation_policy ON your_table + FOR ALL + USING (user_id = current_user_id()); + +3. Test RLS effectiveness: + - Verify different users can only access their own data + - Confirm superusers bypass RLS as expected + - Check policy performance impact + +4. Consider implementing for these table types: + - User profiles and personal data + - Financial records + - Medical information + - Private communications + - Access logs and audit trails""" + ) + findings.append(finding) + + logger.info("RLS detector finished, findings: %d", len(findings)) + return findings + + # Keep run() for backward compatibility + def run(self) -> List[SecurityFinding]: + return self.scan() + + +# Compatible exports / convenience factory + module instance +Plugin = RLSMissingProtectionPlugin +get_plugin = lambda *a, **kw: RLSMissingProtectionPlugin(*a, **kw) +create_plugin = lambda *a, **kw: RLSMissingProtectionPlugin(*a, **kw) +plugin = RLSMissingProtectionPlugin() +__all__ = ["RLSMissingProtectionPlugin", "Plugin", "get_plugin", "create_plugin", "plugin"] \ No newline at end of file diff --git a/Vulnerability_Tool_V2/requirements.txt b/Vulnerability_Tool_V2/requirements.txt new file mode 100644 index 0000000..0c7da7d --- /dev/null +++ b/Vulnerability_Tool_V2/requirements.txt @@ -0,0 +1,15 @@ +# Core dependencies +PyYAML>=6.0 +Jinja2>=3.1.0 +colorama>=0.4.6 + +# Development dependencies +pytest>=7.0.0 +pytest-cov>=4.0.0 +black>=22.0.0 +flake8>=5.0.0 + +# Optional dependencies for advanced features +requests>=2.28.0 +gitpython>=3.1.0 + diff --git a/Vulnerability_Tool_V2/scanner_v2.py b/Vulnerability_Tool_V2/scanner_v2.py new file mode 100644 index 0000000..76ac525 --- /dev/null +++ b/Vulnerability_Tool_V2/scanner_v2.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +""" +NutriHelp Security Scanner V2.0 - Main Entry Point +Modular security scanner main program +""" + +import os +import sys +import argparse +import json +import re +import logging +from pathlib import Path + +# Add the current directory to the Python path +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from core.scanner_engine import VulnerabilityScannerEngine +from core.config_manager import ConfigManager +from core.report_renderer import render_html_report + + +def setup_logging(verbose: bool = False): + """Set up logging system""" + level = logging.DEBUG if verbose else logging.INFO + logging.basicConfig( + level=level, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[logging.StreamHandler()] + ) + + +def main(): + """Main function""" + parser = argparse.ArgumentParser( + description='NutriHelp Security Scanner V2.0 - Modular security scanner', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" + Example usage: + %(prog)s --target ../ # Scan parent directory + %(prog)s --target ../ --format json # Output in JSON format + %(prog)s --target ../ --output report.html --format html + %(prog)s --config custom_config.yaml --target ../ + """ + ) + + parser.add_argument('--target', '-t', required=True, + help='Target directory path') + parser.add_argument('--config', '-c', + help='Configuration file path') + parser.add_argument('--format', '-f', default='summary', + choices=['json', 'html', 'summary'], + help='Output format (default: summary)') + parser.add_argument('--output', '-o', + help='Output file path (default: stdout)') + parser.add_argument('--verbose', '-v', action='store_true', + help='Show verbose logs') + parser.add_argument('--version', action='version', version='%(prog)s 2.0.0') + + args = parser.parse_args() + + # Set up logging + setup_logging(args.verbose) + logger = logging.getLogger("main") + + try: + logger.info("Starting NutriHelp Security Scanner V2.0") + + # 1. Load configuration + config_manager = ConfigManager(args.config) + if not config_manager.validate_config(): + logger.error("Configuration validation failed") + return 1 + + # 2. Initialize scanner engine + # Inject sensible default global excludes to reduce noise when scanning repository root + # Do not overwrite user-provided settings; append missing defaults. + extra_excludes = [ + 'Vulnerability_Tool_V2', + 'Vulnerability_Tool', + 'node_modules', + 'tests', + 'test', + '.pytest_cache', + '__pycache__' + ] + + full_cfg = config_manager.config or {} + existing = full_cfg.get('global_exclude_paths', []) or [] + # ensure list + if not isinstance(existing, list): + existing = list(existing) + + for p in extra_excludes: + if p not in existing: + existing.append(p) + + full_cfg['global_exclude_paths'] = existing + + # Pass the full configuration object to the engine so global excludes are honored + engine = VulnerabilityScannerEngine(full_cfg) + + # 3. Load plugins + plugin_configs = config_manager.get_enabled_plugins() + # Ensure general_security is enabled by default unless explicitly disabled + if 'general_security' not in plugin_configs: + plugin_configs['general_security'] = { 'enabled': True } + engine.load_plugins(plugin_configs) + + if engine.stats['plugins_loaded'] == 0: + logger.warning("No plugins loaded! Scanner will not find any issues.") + + # 4. Execute scan + logger.info(f"Scanning target: {args.target}") + scan_results = engine.scan_target(args.target) + + # 5. Generate output + output_content = format_output(scan_results, args.format, config_manager) + + # 6. Write output + if args.output: + write_output_file(output_content, args.output, args.format) + logger.info(f"Results saved to: {args.output}") + else: + print(output_content) + + # 7. Set exit code + critical_count = scan_results['summary']['by_severity'].get('CRITICAL', 0) + if critical_count > 0: + logger.warning(f"Found {critical_count} critical vulnerabilities!") + return 1 + + logger.info("Scan completed successfully") + return 0 + + except FileNotFoundError as e: + logger.error(f"File not found: {e}") + return 1 + except Exception as e: + logger.error(f"Unexpected error: {e}") + if args.verbose: + import traceback + traceback.print_exc() + return 1 + + +def format_output(scan_results: dict, output_format: str, config_manager: ConfigManager) -> str: + """Format output results""" + if output_format == 'json': + # Ensure findings include rule metadata (rule_id, rule_name, rule_mode, confidence) + return json.dumps(scan_results, indent=2, ensure_ascii=False) + + elif output_format == 'html': + # Use shared renderer for consistent output with API + return render_html_report(scan_results, config_manager) + + elif output_format == 'summary': + return generate_summary_report(scan_results) + + else: + raise ValueError(f"Unsupported output format: {output_format}") + + +def generate_summary_report(scan_results: dict) -> str: + """Generate summary report""" + summary = scan_results['summary'] + findings = scan_results['findings'] + scan_info = scan_results['scan_info'] + + lines = [] + lines.append("🔒 NutriHelp Vulnerability Scanner V2.0 Results") + lines.append("=" * 50) + lines.append("") + + # Scan information + lines.append(f"📁 Target: {scan_info['target_path']}") + lines.append(f"⏰ Scan Time: {scan_info['timestamp']}") + lines.append(f"📊 Files Scanned: {scan_info['stats']['files_scanned']}") + lines.append(f"🔌 Plugins Used: {scan_info['stats']['plugins_loaded']}") + lines.append("") + + # Summary statistics + lines.append("📊 Issues Found by Severity:") + severity_colors = { + 'CRITICAL': '🔴', + 'HIGH': '🟠', + 'MEDIUM': '🟡', + 'LOW': '🟢' + } + + total_issues = summary['total'] + if total_issues == 0: + lines.append(" ✅ No vulnerabilities found!") + else: + for severity in ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW']: + count = summary['by_severity'].get(severity, 0) + if count > 0: + color = severity_colors.get(severity, '⚪') + lines.append(f" {color} {severity}: {count}") + + lines.append("") + lines.append(f"Total Issues: {total_issues}") + + # Plugin statistics + if summary['by_plugin']: + lines.append("") + lines.append("🔌 Issues by Plugin:") + for plugin_name, count in summary['by_plugin'].items(): + lines.append(f" • {plugin_name}: {count}") + + # Critical issues details + critical_findings = [f for f in findings if f.get('severity') == 'CRITICAL'] + if critical_findings: + lines.append("") + lines.append("🚨 CRITICAL ISSUES (Need immediate attention):") + lines.append("-" * 40) + + for i, finding in enumerate(critical_findings[:5], 1): # Only show the first 5 + lines.append(f"{i}. {finding['title']}") + lines.append(f" 📁 File: {finding['file_path']}") + if finding.get('line_number'): + lines.append(f" 📍 Line: {finding['line_number']}") + # include rule metadata if present + rule_meta = [] + if finding.get('rule_id'): + rule_meta.append(f"Rule ID: {finding.get('rule_id')}") + if finding.get('rule_name'): + rule_meta.append(f"Rule: {finding.get('rule_name')}") + if finding.get('rule_mode'): + rule_meta.append(f"Mode: {finding.get('rule_mode')}") + if finding.get('confidence'): + rule_meta.append(f"Confidence: {finding.get('confidence')}") + if rule_meta: + lines.append(f" 🔎 {' | '.join(rule_meta)}") + lines.append(f" 📝 {finding['description']}") + lines.append("") + + if len(critical_findings) > 5: + lines.append(f" ... and {len(critical_findings) - 5} more critical issues") + + # High priority issues overview + high_findings = [f for f in findings if f.get('severity') == 'HIGH'] + if high_findings and len(high_findings) <= 3: # Only show when high priority issues are few + lines.append("") + lines.append("🔶 HIGH PRIORITY ISSUES:") + lines.append("-" * 30) + + for finding in high_findings: + lines.append(f"• {finding['title']} ({finding['file_path']})") + + lines.append("") + lines.append("💡 Use --format html for detailed visual report") + lines.append("💡 Use --format json for machine-readable output") + + return '\n'.join(lines) + + + # CLI previously had a large in-file renderer; replaced by shared renderer + + +def write_output_file(content: str, file_path: str, output_format: str): + """Write output file""" + # Ensure output directory exists + output_dir = os.path.dirname(file_path) + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Determine encoding + encoding = 'utf-8' + + with open(file_path, 'w', encoding=encoding) as f: + f.write(content) + + +if __name__ == '__main__': + sys.exit(main()) \ No newline at end of file diff --git a/Vulnerability_Tool_V2/setup_venv.sh b/Vulnerability_Tool_V2/setup_venv.sh new file mode 100755 index 0000000..bc02ec4 --- /dev/null +++ b/Vulnerability_Tool_V2/setup_venv.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -euo pipefail + +# setup_venv.sh - create venv inside Vulnerability_Tool_V2 and install requirements +# Usage: ./setup_venv.sh [--upgrade-pip] + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +VENV_DIR="$ROOT_DIR/venv" +REQ_FILE="$ROOT_DIR/requirements.txt" +PYTHON_CMD=python3 + +echo "Scanner venv setup script" + +echo "Using python command: ${PYTHON_CMD}" + +if ! command -v ${PYTHON_CMD} >/dev/null 2>&1; then + echo "${PYTHON_CMD} not found in PATH. Please install Python 3.8+ and retry." >&2 + exit 2 +fi + +if [ ! -f "$REQ_FILE" ]; then + echo "Requirements file not found at $REQ_FILE" >&2 + exit 2 +fi + +if [ -d "$VENV_DIR" ]; then + echo "Virtualenv already exists at $VENV_DIR" + echo "To recreate, remove the directory and run this script again: rm -rf $VENV_DIR" +else + echo "Creating virtualenv at $VENV_DIR" + ${PYTHON_CMD} -m venv "$VENV_DIR" +fi + +# Activate and install +# shellcheck disable=SC1091 +source "$VENV_DIR/bin/activate" + +if [ "${1-}" = "--upgrade-pip" ]; then + echo "Upgrading pip..." + pip install --upgrade pip +fi + +echo "Installing Python dependencies from $REQ_FILE" +pip install -r "$REQ_FILE" + +echo "Done. To activate the venv in your session:" +echo " source $VENV_DIR/bin/activate" +echo "Then you can run the scanner directly, for example:" +echo " python $ROOT_DIR/scanner_v2.py --target ../ --format json" + +exit 0 diff --git a/Vulnerability_Tool_V2/templates/report.html b/Vulnerability_Tool_V2/templates/report.html new file mode 100644 index 0000000..8ae54f8 --- /dev/null +++ b/Vulnerability_Tool_V2/templates/report.html @@ -0,0 +1,183 @@ + + + + + + NutriHelp Security Scanner V2.0 Report + + + +
+
+

🔒 NutriHelp Security Scanner V2.0

+
+

Scan time: {{ generated_at }}

+

Target path: {{ scan_info.target_path }}

+

Scanner version: {{ scan_info.scanner_version|default('2.0.0') }}

+
+
+ +
+ {% set severity_counts = {'CRITICAL': 0, 'HIGH': 0, 'MEDIUM': 0, 'LOW': 0} %} + {% for severity, count in summary.by_severity.items() %} + {% if severity.upper() in severity_counts %} + {% set _ = severity_counts.update({severity.upper(): count}) %} + {% endif %} + {% endfor %} + +
+

{{ severity_counts.CRITICAL }}

+

Critical Issues

+
+
+

{{ severity_counts.HIGH }}

+

High Severity

+
+
+

{{ severity_counts.MEDIUM }}

+

Medium Severity

+
+
+

{{ severity_counts.LOW }}

+

Low Severity

+
+
+ +
+
+
+
{{ scan_info.stats.files_scanned }}
+
Files Scanned
+
+
+
{{ scan_info.stats.plugins_loaded }}
+
Plugins Used
+
+
+
{{ summary.total }}
+
Total Issues
+
+
+ +

🔍 Detailed Findings

+ {% for f in findings %} +
+
+
{{ f.title }}
+ {{ f.severity }} +
+ +
+ 📁 {{ f.file_path }} + {% if f.line_number %} (Line {{ f.line_number }}){% endif %} +
+ +
{{ f.description }}
+ + {% if f.recommendation is defined and f.recommendation %} +
+ 💡 Recommendation: + {{ f.recommendation|replace('\n', '
')|safe }} +
+ {% endif %} + +
+ Plugin: {{ f.plugin_name }} +
+
+ {% endfor %} +
+ + +
+ + \ No newline at end of file diff --git a/Vulnerability_Tool_V2/tests/test_basic_functionality.py b/Vulnerability_Tool_V2/tests/test_basic_functionality.py new file mode 100644 index 0000000..bd61837 --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_basic_functionality.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +""" +Basic functional unit testing for Vulnerability_Tool_V2 +tests/test_basic_functionality.py +""" + +import unittest +import sys +import os +from pathlib import Path + +# Add project root directory to Python path +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + +try: + from core.config_manager import ConfigManager + from core.scanner_engine import SecurityScannerEngine + from plugins.base_plugin import BaseSecurityPlugin, SecurityFinding +except ImportError as e: + print(f"Import error: {e}") + print("This is expected if modules are not yet implemented") + + +class TestBasicFunctionality(unittest.TestCase): + """Basic functionality test class""" + + def setUp(self): + """Test setup""" + self.test_config_path = project_root / "config" / "scanner_config.yaml" + + def test_config_manager_initialization(self): + """Test ConfigManager initialization""" + try: + config_manager = ConfigManager() + self.assertIsNotNone(config_manager) + print("✅ ConfigManager initialization test passed") + except Exception as e: + self.skipTest(f"ConfigManager not available: {e}") + + def test_scanner_engine_initialization(self): + """Test SecurityScannerEngine initialization""" + try: + config_manager = ConfigManager() + scanner_config = config_manager.get_scanner_config() + engine = SecurityScannerEngine(scanner_config) + self.assertIsNotNone(engine) + print("✅ SecurityScannerEngine initialization test passed") + except Exception as e: + self.skipTest(f"SecurityScannerEngine not available: {e}") + + def test_security_finding_creation(self): + """Test SecurityFinding creation""" + try: + finding = SecurityFinding( + title="Test Finding", + description="Test Description", + severity="MEDIUM", + file_path="/test/path", + line_number=1, + plugin_name="TestPlugin" + ) + self.assertEqual(finding.title, "Test Finding") + self.assertEqual(finding.severity, "MEDIUM") + print("✅ SecurityFinding creation test passed") + except Exception as e: + self.skipTest(f"SecurityFinding not available: {e}") + + def test_base_plugin_interface(self): + """Test BaseSecurityPlugin interface""" + try: + # Create a simple test plugin + class TestPlugin(BaseSecurityPlugin): + def get_plugin_info(self): + return { + 'name': 'Test Plugin', + 'version': '1.0.0', + 'description': 'Test plugin for unit testing' + } + + def get_severity_level(self): + return 'MEDIUM' + + def scan(self, target_path): + return [] + + plugin = TestPlugin() + info = plugin.get_plugin_info() + self.assertEqual(info['name'], 'Test Plugin') + print("✅ BaseSecurityPlugin interface test passed") + except Exception as e: + self.skipTest(f"BaseSecurityPlugin not available: {e}") + + def test_configuration_file_exists(self): + """Test configuration file existence""" + self.assertTrue(self.test_config_path.exists(), + f"Configuration file not found: {self.test_config_path}") + print("✅ Configuration file existence test passed") + + def test_directory_structure(self): + """Test directory structure""" + required_dirs = [ + "core", + "plugins", + "config", + "reports", + "tests" + ] + + for dir_name in required_dirs: + dir_path = project_root / dir_name + self.assertTrue(dir_path.exists(), f"Required directory not found: {dir_name}") + + print("✅ Directory structure test passed") + + +def run_tests(): + """Run all tests""" + print("🧪 Running basic functionality tests...") + print("=" * 50) + + # Create test suite + test_suite = unittest.TestLoader().loadTestsFromTestCase(TestBasicFunctionality) + + # Run tests + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(test_suite) + + print("=" * 50) + if result.wasSuccessful(): + print("🎉 All basic functionality tests passed!") + return True + else: + print("❌ Some tests failed or were skipped") + return False + + +if __name__ == '__main__': + success = run_tests() + sys.exit(0 if success else 1) \ No newline at end of file diff --git a/Vulnerability_Tool_V2/tests/test_debug_rules_and_html.py b/Vulnerability_Tool_V2/tests/test_debug_rules_and_html.py new file mode 100644 index 0000000..1826704 --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_debug_rules_and_html.py @@ -0,0 +1,48 @@ +import sys +import os +import json + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from core.scanner_engine import SecurityScannerEngine +from core.config_manager import ConfigManager +from core.report_renderer import render_html_report + + +def test_debug_rules_toggle_and_html_output(tmp_path): + # setup temp dir with files that match debug-mode rules (email/url/ip) + tdir = tmp_path / "sample2" + tdir.mkdir() + f = tdir / "data.txt" + f.write_text("contact: info@example.com\nlink: https://example.com\nip: 192.168.1.1\n") + + cfg = ConfigManager(None) + scanner_cfg = cfg.get_scanner_config() + + # Load only the general_security plugin to keep test deterministic + pcfg = { + 'general_security': { + 'enabled': True, + 'legacy_rules': {'enabled': True, 'include_debug_rules': False} + } + } + engine = SecurityScannerEngine(scanner_cfg) + engine.load_plugins(pcfg) + + results_no_debug = engine.scan_target(str(tdir)) + # There should be no findings with rule_mode == 'debug' + assert not any(f.get('rule_mode') == 'debug' for f in results_no_debug.get('findings', [])) + + # Now enable debug rules and reload plugins + pcfg['general_security']['legacy_rules']['include_debug_rules'] = True + engine = SecurityScannerEngine(scanner_cfg) + engine.load_plugins(pcfg) + results_with_debug = engine.scan_target(str(tdir)) + + # Now at least one finding should come from a debug-mode rule + assert any(f.get('rule_mode') == 'debug' for f in results_with_debug.get('findings', [])) + + # Generate HTML and assert rule metadata label appears in HTML for the findings + scan_results = results_with_debug + html = render_html_report(scan_results) + assert 'Rule:' in html or 'rule_id' in html or 'Rule ID' in html diff --git a/Vulnerability_Tool_V2/tests/test_exclude_paths.py b/Vulnerability_Tool_V2/tests/test_exclude_paths.py new file mode 100644 index 0000000..5a70c7b --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_exclude_paths.py @@ -0,0 +1,97 @@ +import os +import sys +import tempfile +import shutil +# Ensure repo root is on sys.path for imports during pytest +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) +from Vulnerability_Tool_V2.core.scanner_engine import VulnerabilityScannerEngine + + +def setup_sample_repo(tmpdir): + # Create sample structure + base = tmpdir + os.makedirs(os.path.join(base, 'src'), exist_ok=True) + os.makedirs(os.path.join(base, 'tests'), exist_ok=True) + os.makedirs(os.path.join(base, 'Vulnerability_Tool_V2'), exist_ok=True) + + with open(os.path.join(base, 'src', 'app.js'), 'w') as f: + f.write("console.log('hello')\n") + + with open(os.path.join(base, 'tests', 'test_dummy.js'), 'w') as f: + f.write("describe('dummy', () => {});\n") + + # file that should be excluded + with open(os.path.join(base, 'Vulnerability_Tool_V2', 'plugins', 'dummy.py'), 'w') as f: + os.makedirs(os.path.join(base, 'Vulnerability_Tool_V2', 'plugins'), exist_ok=True) + f.write('# plugin file') + + return base + + +def test_engine_excludes(tmp_path): + repo = tmp_path / "sample_repo" + repo.mkdir() + + # construct folders + (repo / 'src').mkdir() + (repo / 'tests').mkdir() + (repo / 'Vulnerability_Tool_V2').mkdir() + (repo / 'Vulnerability_Tool_V2' / 'plugins').mkdir(parents=True) + + # create files + (repo / 'src' / 'app.js').write_text("console.log('ok')\n") + (repo / 'tests' / 'test_dummy.js').write_text("describe('x', ()=>{})\n") + (repo / 'Vulnerability_Tool_V2' / 'plugins' / 'plugin.py').write_text("# plugin file\n") + + # configure engine with global excludes + cfg = { + 'global_exclude_paths': ['tests/', 'Vulnerability_Tool_V2/'] + } + engine = VulnerabilityScannerEngine(cfg) + + # load only the general_security plugin (it exists in repo); but tests should not rely on plugin detection + # We will manually register a minimal plugin that returns a finding for each file that contains 'console' + class DummyPlugin: + def __init__(self, config=None): + self.config = config or {} + self.findings = [] + self.name = 'dummy' + + def get_plugin_info(self): + return {'name': 'dummy', 'version': '0.0.1'} + + def get_severity_level(self): + return 'LOW' + + def scan(self, target_path): + res = [] + for root, dirs, files in os.walk(target_path): + dirs[:] = [d for d in dirs if not any(p and p in os.path.join(root, d) for p in self.config.get('exclude_paths', []))] + for fname in files: + fpath = os.path.join(root, fname) + # use plugin-level file exclusion + if any(p and p in fpath for p in self.config.get('exclude_paths', [])): + continue + try: + with open(fpath, 'r') as fh: + content = fh.read() + if 'console' in content: + res.append({ 'title': 'found console', 'file_path': fpath }) + except Exception: + continue + return res + + # register our dummy plugin + dp = DummyPlugin() + engine.plugin_manager.register_plugin(dp) + + result = engine.scan_target(str(repo)) + + # findings should not include files under tests/ or Vulnerability_Tool_V2/ + for f in result['findings']: + fp = f.get('file_path', '') + assert 'tests/' not in fp + assert 'Vulnerability_Tool_V2/' not in fp + + # ensure at least one finding from src/app.js exists + assert any('app.js' in f.get('file_path', '') for f in result['findings']) diff --git a/Vulnerability_Tool_V2/tests/test_general_security_legacy_rules.py b/Vulnerability_Tool_V2/tests/test_general_security_legacy_rules.py new file mode 100644 index 0000000..2c0d9a0 --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_general_security_legacy_rules.py @@ -0,0 +1,29 @@ +import os +import sys +# ensure project package path is available for imports when running tests from repo root +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from plugins.general_security import GeneralSecurityPlugin + + +def test_legacy_api_key_rule_triggers(tmp_path): + # create a sample JS file with hardcoded api_key + sample = tmp_path / "sample_api_key.js" + sample.write_text("const api_key = 'ABCDEFGH12345678';\nconsole.log('ok');\n") + + plugin = GeneralSecurityPlugin(config={}) + findings = plugin.scan(str(tmp_path)) + + # There should be at least one finding whose title includes API_Key or Hardcoded + titles = [f.title for f in plugin.findings] + assert any('API_Key' in t or 'Hardcoded' in t or 'API Key' in t for t in titles), f"No api key finding found: {titles}" + + +def test_permissive_cors_triggers(tmp_path): + sample = tmp_path / "cors.conf" + sample.write_text("Access-Control-Allow-Origin: *\n") + + plugin = GeneralSecurityPlugin(config={}) + findings = plugin.scan(str(tmp_path)) + + titles = [f.title for f in plugin.findings] + assert any('Permissive CORS' in t or 'CORS' in t for t in titles), f"No CORS finding found: {titles}" diff --git a/Vulnerability_Tool_V2/tests/test_general_security_plugin.py b/Vulnerability_Tool_V2/tests/test_general_security_plugin.py new file mode 100644 index 0000000..835379e --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_general_security_plugin.py @@ -0,0 +1,32 @@ +import os +import sys +from pathlib import Path + +# Ensure the package root (Vulnerability_Tool_V2) is on sys.path so `plugins` is importable +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from plugins.general_security import GeneralSecurityPlugin + + +def make_sample_file(tmp_path, content, name='sample.js'): + p = tmp_path / name + p.write_text(content, encoding='utf-8') + return str(p) + + +def test_hardcoded_secret_detection(tmp_path): + content = """ + const JWT_SECRET = "supersecret123"; + """ + f = make_sample_file(tmp_path, content, 'secret.js') + plugin = GeneralSecurityPlugin() + findings = plugin.scan(str(tmp_path)) + assert any('Hardcoded secret' in f.title for f in findings) + + +def test_db_connection_detection(tmp_path): + content = "db_url = 'postgres://user:pass@localhost:5432/dbname'" + f = make_sample_file(tmp_path, content, 'db.conf') + plugin = GeneralSecurityPlugin() + findings = plugin.scan(str(tmp_path)) + assert any('DB connection' in f.title or 'DB connection' in f.description or 'connection string' in f.description for f in findings) diff --git a/Vulnerability_Tool_V2/tests/test_internal_file_exclude.py b/Vulnerability_Tool_V2/tests/test_internal_file_exclude.py new file mode 100644 index 0000000..0b8ac2d --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_internal_file_exclude.py @@ -0,0 +1,44 @@ +import os +import pytest + +from Vulnerability_Tool_V2.core.scanner_engine import VulnerabilityScannerEngine + + +def _scan_with_config(config): + engine = VulnerabilityScannerEngine(config=config) + engine.load_plugins({}) + return engine.scan_target(os.getcwd()) + + +def test_default_internal_files_excluded(): + # default config should exclude internal scanner files + cfg = { + 'scanner': { + 'exclude_internal_scanner_files': True, + 'internal_paths': ['routes/scanner.js', 'reports/'] + } + } + + res = _scan_with_config(cfg) + file_paths = [f.get('file_path') or '' for f in res['findings']] + + # Ensure scanner route is not present in findings + assert not any('routes/scanner.js' in p or 'scanner.js' == os.path.basename(p) for p in file_paths) + + +def test_disable_internal_exclusion_includes_files(): + # When toggle is off, internal paths should be allowed + cfg = { + 'scanner': { + 'exclude_internal_scanner_files': False, + 'internal_paths': ['routes/scanner.js', 'reports/'] + } + } + + res = _scan_with_config(cfg) + file_paths = [f.get('file_path') or '' for f in res['findings']] + + # It's acceptable if no finding references the scanner file because plugins may not flag it, + # but we at least assert that the engine did not remove entries that explicitly reference it. + # To be conservative, check that sanitization did not forcibly remove any path equal to 'routes/scanner.js' + assert all(not (p and p.endswith('routes/scanner.js') and cfg['scanner']['exclude_internal_scanner_files']) for p in file_paths) diff --git a/Vulnerability_Tool_V2/tests/test_output_json_fields.py b/Vulnerability_Tool_V2/tests/test_output_json_fields.py new file mode 100644 index 0000000..8c05a74 --- /dev/null +++ b/Vulnerability_Tool_V2/tests/test_output_json_fields.py @@ -0,0 +1,43 @@ +import sys +import os +import json + +# ensure repo root is on path +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from core.scanner_engine import SecurityScannerEngine +from core.config_manager import ConfigManager + + +def test_json_output_includes_rule_metadata(tmp_path): + # Use a small temp directory with a sample file that triggers legacy rules + target = tmp_path / "sample" + target.mkdir() + sample_file = target / "test.js" + sample_file.write_text(""" + // sample to trigger api key hardcoded + const api_key = 'ABC123DEF456GHI789'; + """) + + # Load default config manager + cfg = ConfigManager(None) + scanner_cfg = cfg.get_scanner_config() + engine = SecurityScannerEngine(scanner_cfg) + # ensure general_security plugin enabled + plugin_cfgs = cfg.get_enabled_plugins() + if 'general_security' not in plugin_cfgs: + plugin_cfgs['general_security'] = {'enabled': True} + engine.load_plugins(plugin_cfgs) + + results = engine.scan_target(str(target)) + # Serialize to JSON (same as CLI does) + j = json.loads(json.dumps(results, ensure_ascii=False)) + + assert 'findings' in j + # Require that at least one finding contains the rule metadata keys (if any findings exist) + if j['findings']: + f = j['findings'][0] + assert 'rule_id' in f + assert 'rule_name' in f + assert 'confidence' in f + assert 'rule_mode' in f diff --git a/Vulnerability_Tool_V2/tools/render_from_json.py b/Vulnerability_Tool_V2/tools/render_from_json.py new file mode 100644 index 0000000..359c5ea --- /dev/null +++ b/Vulnerability_Tool_V2/tools/render_from_json.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +""" +Helper: render JSON scan result to HTML using the project's renderer (render_html_report). +Usage: render_from_json.py +""" +import sys +import json +import os + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from core.report_renderer import render_html_report + + +def main(): + if len(sys.argv) < 3: + print('Usage: render_from_json.py ') + return 2 + json_file = sys.argv[1] + output_file = sys.argv[2] + + with open(json_file, 'r', encoding='utf-8') as f: + data = json.load(f) + + # config manager optional; pass None + html = render_html_report(data, None) + + with open(output_file, 'w', encoding='utf-8') as f: + f.write(html) + + print(output_file) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/Vulnerability_Tool_V2/verify_phase1.py b/Vulnerability_Tool_V2/verify_phase1.py new file mode 100644 index 0000000..8deb14d --- /dev/null +++ b/Vulnerability_Tool_V2/verify_phase1.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +""" +Phase 1 Quick verification script +Verify that the modular infrastructure is built correctly +""" + +import os +import sys +import subprocess +from pathlib import Path + + +def check_file_exists(file_path, description): + """Check if a file exists""" + if os.path.exists(file_path): + print(f"✅ {description}: {file_path}") + return True + else: + print(f"❌ {description}: {file_path} (MISSING)") + return False + +def check_directory_structure(): + """Check directory structure""" + print("🏗️ Checking directory structure...") + + required_dirs = [ + ("core", "Core engine directory"), + ("plugins", "Plugins directory"), + ("plugins/jwt_security", "JWT Security plugin directory"), + ("plugins/rls_security", "RLS Security plugin directory"), + ("config", "Configuration directory"), + ("tests", "Tests directory"), + ("reports", "Reports directory"), + ] + + all_exist = True + for dir_path, description in required_dirs: + if os.path.exists(dir_path): + print(f"✅ {description}: {dir_path}/") + else: + print(f"❌ {description}: {dir_path}/ (MISSING)") + all_exist = False + + return all_exist + +def check_core_files(): + """Check core files""" + print("\n🔧 Checking core files...") + + required_files = [ + ("plugins/base_plugin.py", "Base plugin class"), + ("core/scanner_engine.py", "Scanner engine"), + ("core/config_manager.py", "Configuration manager"), + ("config/scanner_config.yaml", "Scanner configuration"), + ("scanner_v2.py", "Main program entry"), + ("requirements.txt", "Dependencies file"), + ] + + all_exist = True + for file_path, description in required_files: + if not check_file_exists(file_path, description): + all_exist = False + + return all_exist + +def check_python_syntax(): + """Check Python syntax""" + print("\n🐍 Checking Python syntax...") + + python_files = [ + "plugins/base_plugin.py", + "core/scanner_engine.py", + "core/config_manager.py", + "scanner_v2.py", + "tests/test_basic_functionality.py" + ] + + all_valid = True + for file_path in python_files: + if os.path.exists(file_path): + try: + with open(file_path, 'r', encoding='utf-8') as f: + compile(f.read(), file_path, 'exec') + print(f"✅ Syntax check passed: {file_path}") + except SyntaxError as e: + print(f"❌ Syntax error {file_path}: {e}") + all_valid = False + else: + print(f"⚠️ File not found: {file_path}") + + return all_valid + +def test_basic_imports(): + """Test basic imports""" + print("\n📦 Testing module imports...") + + import_tests = [ + ("from plugins.base_plugin import BaseSecurityPlugin", "Base plugin class"), + ("from core.config_manager import ConfigManager", "Configuration manager"), + ("from core.scanner_engine import SecurityScannerEngine", "Scanner engine"), + ] + + all_imported = True + for import_stmt, description in import_tests: + try: + exec(import_stmt) + print(f"✅ Import successful: {description}") + except ImportError as e: + print(f"❌ Import failed {description}: {e}") + all_imported = False + except Exception as e: + print(f"❌ Error {description}: {e}") + all_imported = False + + return all_imported + +def test_basic_functionality(): + """Test basic functionality""" + print("\n⚙️ Testing basic functionality...") + + try: + # Test configuration manager + from core.config_manager import ConfigManager + config_manager = ConfigManager() + print("✅ Configuration manager initialized successfully") + + # Test scanner engine + from core.scanner_engine import SecurityScannerEngine + engine = SecurityScannerEngine() + print("✅ Scanner engine initialized successfully") + + # Test base plugin + from plugins.base_plugin import BaseSecurityPlugin + print("✅ Base plugin class imported successfully") + + return True + + except Exception as e: + print(f"❌ Functionality test failed: {e}") + return False + +def test_cli_interface(): + """Test command line interface""" + print("\n🖥️ Testing command line interface...") + + try: + # Test help information + result = subprocess.run([ + sys.executable, 'scanner_v2.py', '--help' + ], capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + print("✅ Help information displayed correctly") + return True + else: + print(f"❌ Help information failed: {result.stderr}") + return False + + except subprocess.TimeoutExpired: + print("❌ Command line test timed out") + return False + except Exception as e: + print(f"❌ Command line test failed: {e}") + return False + +def run_unit_tests(): + """Run unit tests""" + print("\n🧪 Running unit tests...") + + if not os.path.exists('tests/test_basic_functionality.py'): + print("⚠️ Test file not found, skipping unit tests") + return True + + try: + result = subprocess.run([ + sys.executable, 'tests/test_basic_functionality.py' + ], capture_output=True, text=True, timeout=30) + + if result.returncode == 0: + print("✅ Unit tests passed") + print("📊 Test output:") + for line in result.stdout.split('\n')[-10:]: # Show last 10 lines + if line.strip(): + print(f" {line}") + return True + else: + print("❌ Unit tests failed") + print("📊 Test output:", result.stderr) + return False + + except subprocess.TimeoutExpired: + print("❌ Unit tests timed out") + return False + except Exception as e: + print(f"❌ Unit tests failed: {e}") + return False + +def main(): + """Main verification function""" + print("🚀 Phase 1 verification started...") + print("=" * 50) + + all_passed = True + + # Check project structure + checks = [ + ("Directory Structure", check_directory_structure), + ("Core Files", check_core_files), + ("Python Syntax", check_python_syntax), + ("Module Imports", test_basic_imports), + ("Basic Functionality", test_basic_functionality), + ("Command Line Interface", test_cli_interface), + ("Unit Tests", run_unit_tests), + ] + + results = {} + + for check_name, check_func in checks: + try: + result = check_func() + results[check_name] = result + if not result: + all_passed = False + except Exception as e: + print(f"❌ {check_name} check failed: {e}") + results[check_name] = False + all_passed = False + + # Output final results + print("\n" + "=" * 50) + print("📋 Phase 1 verification results summary:") + + for check_name, passed in results.items(): + status = "✅ Passed" if passed else "❌ Failed" + print(f" {check_name}: {status}") + + print("\n" + "=" * 50) + + if all_passed: + print("🎉 Phase 1 verification succeeded!") + print("✅ All checks passed") + print("🚀 You can proceed to Phase 2 (JWT Security Plugin Development)") + return 0 + else: + print("⚠️ Phase 1 verification failed") + print("🔧 Please fix the issues based on the error messages above and re-verify") + return 1 + +if __name__ == '__main__': + sys.exit(main()) \ No newline at end of file diff --git a/index.yaml b/index.yaml index 341fe0f..1030136 100644 --- a/index.yaml +++ b/index.yaml @@ -3,7 +3,9 @@ info: title: NutriHelp API version: 1.0.0 servers: - - url: http://localhost/api + - url: "http://localhost" + description: "Local API" + tags: - name: System description: System and security monitoring endpoints @@ -11,6 +13,8 @@ tags: description: KPIs and trends from public.audit_logs - name: Allergy description: Endpoints for allergy checks and warnings + - name: Vulnerability Scanner + description: Endpoints for the vulnerability scanner paths: /allergy/common: get: @@ -1933,7 +1937,6 @@ paths: description: Number of records to return schema: type: integer - description: Integer ID of the recipe for cost calculation default: 20 - name: page in: query @@ -1952,7 +1955,6 @@ paths: default: true responses: '200': - description: Calculate cost successfully description: Successfully retrieved requested data content: application/json: @@ -2474,33 +2476,57 @@ paths: Weight: 78.5 days_per_week: 4 workout_place: gym + type: object + properties: + user_id: + type: string + format: uuid + description: The unique ID of the user + glasses_consumed: + type: integer + description: Number of glasses consumed + required: + - user_id + - glasses_consumed + example: + user_id: "15" + glasses_consumed: 5 responses: '200': - description: Weekly plan generated successfully + description: Water intake updated successfully content: application/json: schema: - $ref: '#/components/schemas/HealthPlanResponse' + type: object + properties: + message: + type: string + example: "Water intake updated successfully" + data: + type: object + properties: + user_id: + type: string + example: "15" + date: + type: string + format: date + example: "2025-05-10" + glasses_consumed: + type: integer + example: 5 + updated_at: + type: string + format: date-time + example: "2025-05-10T12:00:00Z" '400': - description: Bad Request – invalid or missing input fields - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - '401': - description: Unauthorized – Authentication credentials missing or invalid. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - '502': - description: AI server error – upstream FastAPI returned an error + description: Bad request - missing or invalid fields content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' '500': - description: Internal Server Error – Something went wrong on the server + description: Internal server error content: application/json: schema: @@ -2682,43 +2708,6 @@ paths: format: date-time example: "2025-08-03T12:14:00.706Z" - /barcode: - post: - summary: Detect user allergen from a given barcode - description: Retrieve ingredients information from a given barcode and detect user's allergen ingredients - parameters: - - name: code - in: query - required: true - schema: - type: integer - description: Barcode number for allergen detection - requestBody: - required: false - content: - application/json: - schema: - type: object - properties: - user_id: - type: integer - description: The user ID - required: - - user_id - responses: - '200': - description: Barcode scanning successful - content: - application/json: - schema: - $ref: '#/components/schemas/BarcodeAllergenDetection' - '500': - description: Internal server error - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - components: securitySchemes: @@ -2967,6 +2956,13 @@ components: type: string allergy: type: string + example: + target_path: "./routes" + plugins: + - "JWTMissingProtectionPlugin" + - "JWTConfigurationPlugin" + - "general_security" + output_format: "json" dislikes: type: string MealPlanRecipe: @@ -3756,3 +3752,5 @@ components: items: type: string example: ["Vegetable", "Meat", "Dairy", "Pantry"] + + \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index c33089a..b62bb4c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7,6 +7,7 @@ "": { "name": "nutrihelp-api", "version": "1.0.0", + "hasInstallScript": true, "license": "ISC", "dependencies": { "@sendgrid/mail": "^8.1.3", @@ -30,9 +31,13 @@ "sinon": "^18.0.0", "swagger-ui-express": "^5.0.0", "twilio": "^5.9.0", + "uuid": "^8.3.2", + "v8-to-istanbul": "^9.3.0", "yamljs": "^0.3.0" }, "devDependencies": { + "acorn": "^8.15.0", + "acorn-walk": "^8.3.4", "axios": "^1.11.0", "chai": "^6.0.1", "chai-http": "^5.1.2", @@ -42,7 +47,52 @@ "mocha": "^11.7.2", "nodemon": "^3.1.10", "proxyquire": "^2.1.3", - "supertest": "^7.1.4" + "supertest": "^7.1.4", + "swagger-jsdoc": "^6.2.8" + } + }, + "node_modules/@apidevtools/json-schema-ref-parser": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-9.1.2.tgz", + "integrity": "sha512-r1w81DpR+KyRWd3f+rk6TNqMgedmAxZP5v5KWlXQWlgMUUtyEJch0DKEci1SorPMiSeM8XPl7MZ3miJ60JIpQg==", + "dev": true, + "dependencies": { + "@jsdevtools/ono": "^7.1.3", + "@types/json-schema": "^7.0.6", + "call-me-maybe": "^1.0.1", + "js-yaml": "^4.1.0" + } + }, + "node_modules/@apidevtools/openapi-schemas": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@apidevtools/openapi-schemas/-/openapi-schemas-2.1.0.tgz", + "integrity": "sha512-Zc1AlqrJlX3SlpupFGpiLi2EbteyP7fXmUOGup6/DnkRgjP9bgMM/ag+n91rsv0U1Gpz0H3VILA/o3bW7Ua6BQ==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/@apidevtools/swagger-methods": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@apidevtools/swagger-methods/-/swagger-methods-3.0.2.tgz", + "integrity": "sha512-QAkD5kK2b1WfjDS/UQn/qQkbwF31uqRjPTrsCs5ZG9BQGAkjwvqGFjjPqAuzac/IYzpPtRzjCP1WrTuAIjMrXg==", + "dev": true + }, + "node_modules/@apidevtools/swagger-parser": { + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/@apidevtools/swagger-parser/-/swagger-parser-10.0.3.tgz", + "integrity": "sha512-sNiLY51vZOmSPFZA5TF35KZ2HbgYklQnTSDnkghamzLb3EkNtcQnrBQEj5AOCxHpTtXpqMCRM1CrmV2rG6nw4g==", + "dev": true, + "dependencies": { + "@apidevtools/json-schema-ref-parser": "^9.0.6", + "@apidevtools/openapi-schemas": "^2.0.4", + "@apidevtools/swagger-methods": "^3.0.2", + "@jsdevtools/ono": "^7.1.3", + "call-me-maybe": "^1.0.1", + "z-schema": "^5.0.1" + }, + "peerDependencies": { + "openapi-types": ">=7" } }, "node_modules/@babel/code-frame": { @@ -1270,7 +1320,6 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "dev": true, "license": "MIT", "engines": { "node": ">=6.0.0" @@ -1280,20 +1329,24 @@ "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", - "dev": true, "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.31", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", - "dev": true, "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@jsdevtools/ono": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/@jsdevtools/ono/-/ono-7.1.3.tgz", + "integrity": "sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==", + "dev": true + }, "node_modules/@mapbox/node-pre-gyp": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz", @@ -1628,7 +1681,6 @@ "version": "2.0.6", "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", - "dev": true, "license": "MIT" }, "node_modules/@types/istanbul-lib-report": { @@ -1651,6 +1703,12 @@ "@types/istanbul-lib-report": "*" } }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, "node_modules/@types/methods": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/@types/methods/-/methods-1.1.4.tgz", @@ -2013,6 +2071,30 @@ "node": ">= 0.6" } }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/agent-base": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", @@ -2504,6 +2586,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/call-me-maybe": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz", + "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==", + "dev": true + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -2772,6 +2860,15 @@ "node": ">= 0.8" } }, + "node_modules/commander": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.0.tgz", + "integrity": "sha512-zP4jEKbe8SHzKJYQmq8Y9gYjtO/POJLgIdKgV7B9qNmABVFVc+ctqSX6iXh4mCpJfRBOabiZ2YKPg8ciDw6C+Q==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/component-emitter": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.1.tgz", @@ -2874,7 +2971,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true, "license": "MIT" }, "node_modules/cookie": { @@ -3098,6 +3194,18 @@ "node": ">=0.3.1" } }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/dotenv": { "version": "16.6.1", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", @@ -3279,6 +3387,15 @@ "node": ">=4" } }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", @@ -5185,6 +5302,13 @@ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "license": "MIT" }, + "node_modules/lodash.get": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", + "integrity": "sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ==", + "deprecated": "This package is deprecated. Use the optional chaining (?.) operator instead.", + "dev": true + }, "node_modules/lodash.includes": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", @@ -5197,6 +5321,13 @@ "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==", "license": "MIT" }, + "node_modules/lodash.isequal": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", + "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==", + "deprecated": "This package is deprecated. Use require('node:util').isDeepStrictEqual instead.", + "dev": true + }, "node_modules/lodash.isinteger": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", @@ -5221,6 +5352,12 @@ "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==", "license": "MIT" }, + "node_modules/lodash.mergewith": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.mergewith/-/lodash.mergewith-4.6.2.tgz", + "integrity": "sha512-GK3g5RPZWTRSeLSpgP8Xhra+pnjBC56q9FZYe1d5RN3TJ35dbkGy3YqBSMbyCrlbi+CM9Z3Jk5yTL7RCsqboyQ==", + "dev": true + }, "node_modules/lodash.once": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", @@ -5983,6 +6120,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/openapi-types": { + "version": "12.1.3", + "resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz", + "integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==", + "dev": true, + "peer": true + }, "node_modules/p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", @@ -7133,6 +7277,81 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/swagger-jsdoc": { + "version": "6.2.8", + "resolved": "https://registry.npmjs.org/swagger-jsdoc/-/swagger-jsdoc-6.2.8.tgz", + "integrity": "sha512-VPvil1+JRpmJ55CgAtn8DIcpBs0bL5L3q5bVQvF4tAW/k/9JYSj7dCpaYCAv5rufe0vcCbBRQXGvzpkWjvLklQ==", + "dev": true, + "dependencies": { + "commander": "6.2.0", + "doctrine": "3.0.0", + "glob": "7.1.6", + "lodash.mergewith": "^4.6.2", + "swagger-parser": "^10.0.3", + "yaml": "2.0.0-1" + }, + "bin": { + "swagger-jsdoc": "bin/swagger-jsdoc.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/swagger-jsdoc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/swagger-jsdoc/node_modules/glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/swagger-jsdoc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/swagger-parser": { + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/swagger-parser/-/swagger-parser-10.0.3.tgz", + "integrity": "sha512-nF7oMeL4KypldrQhac8RyHerJeGPD1p2xDh900GPvc+Nk7nWP6jX2FcC7WmkinMoAmoO774+AFXcWsW8gMWEIg==", + "dev": true, + "dependencies": { + "@apidevtools/swagger-parser": "10.0.3" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/swagger-ui-dist": { "version": "5.28.0", "resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.28.0.tgz", @@ -7510,12 +7729,18 @@ "node": ">= 0.4.0" } }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/v8-to-istanbul": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", - "dev": true, - "license": "ISC", "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", @@ -7733,6 +7958,15 @@ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, + "node_modules/yaml": { + "version": "2.0.0-1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.0.0-1.tgz", + "integrity": "sha512-W7h5dEhywMKenDJh2iX/LABkbFnBxasD27oyXWDS/feDsxiw0dD5ncXdYXgkvAsXIY2MpW/ZKkr9IU30DBdMNQ==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/yamljs": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/yamljs/-/yamljs-0.3.0.tgz", @@ -7856,6 +8090,36 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/z-schema": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/z-schema/-/z-schema-5.0.5.tgz", + "integrity": "sha512-D7eujBWkLa3p2sIpJA0d1pr7es+a7m0vFAnZLlCEKq/Ij2k0MLi9Br2UPxoxdYystm5K1yeBGzub0FlYUEWj2Q==", + "dev": true, + "dependencies": { + "lodash.get": "^4.4.2", + "lodash.isequal": "^4.5.0", + "validator": "^13.7.0" + }, + "bin": { + "z-schema": "bin/z-schema" + }, + "engines": { + "node": ">=8.0.0" + }, + "optionalDependencies": { + "commander": "^9.4.1" + } + }, + "node_modules/z-schema/node_modules/commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "dev": true, + "optional": true, + "engines": { + "node": "^12.20.0 || >=14" + } } } } diff --git a/package.json b/package.json index 603bb4c..bf6d357 100644 --- a/package.json +++ b/package.json @@ -7,8 +7,15 @@ "start": "node server.js", "dev": "nodemon server.js", "test:rce": "mocha ./test/costEstimationTest.js", - "test": "mocha ./test/**/*.test.js", - "validate-env": "node scripts/validateEnv.js" + "test:integration": "concurrently -k \"node server.js\" \"mocha --timeout 10000 --exit\"", + "security-scan": "python3 scanner_v2.py --format html --output security_report.html", + "security-check": "python3 scanner_v2.py --format summary", + "test:unit": "mocha ./test/**/*.test.js", + "validate-env": "node scripts/validateEnv.js", + "prepare-scanner": "node scripts/prepareScanner.js", + "ensure-scanner": "node scripts/ensureScannerReady.js", + "setup": "node scripts/bootstrap.js --mode=full", + "postinstall": "node scripts/bootstrap.js --mode=postinstall" }, "jest": { "testMatch": [ @@ -45,9 +52,13 @@ "sinon": "^18.0.0", "swagger-ui-express": "^5.0.0", "twilio": "^5.9.0", + "uuid": "^8.3.2", + "v8-to-istanbul": "^9.3.0", "yamljs": "^0.3.0" }, "devDependencies": { + "acorn": "^8.15.0", + "acorn-walk": "^8.3.4", "axios": "^1.11.0", "chai": "^6.0.1", "chai-http": "^5.1.2", @@ -57,6 +68,7 @@ "mocha": "^11.7.2", "nodemon": "^3.1.10", "proxyquire": "^2.1.3", - "supertest": "^7.1.4" + "supertest": "^7.1.4", + "swagger-jsdoc": "^6.2.8" } } diff --git a/routes/index.js b/routes/index.js index 77b9ae1..1a82513 100644 --- a/routes/index.js +++ b/routes/index.js @@ -33,6 +33,7 @@ module.exports = app => { // Add shopping list routes app.use('/api/shopping-list', require('./shoppingList')); + app.use('/api/scanner', require('./scanner')); // Vulnerability Scanner API app.use('/api/barcode', require('./barcodeScanning')); diff --git a/routes/scanner.js b/routes/scanner.js new file mode 100644 index 0000000..ba30e2d --- /dev/null +++ b/routes/scanner.js @@ -0,0 +1,1271 @@ +// routes/scanner.js +const express = require('express'); +const router = express.Router(); +const { spawn, spawnSync } = require('child_process'); +const path = require('path'); +const fs = require('fs').promises; +const { v4: uuidv4 } = require('uuid'); + +// Storage Scan Status +const activeScanners = new Map(); + +// Generate scan id in style: scan_YYYYMMDD_HHMMSS_ +// Generate scan id in style: YYYYMMDD_HHMMSS_ +// tag defaults to 'scan' but can be set to 'quick-scan' or others. Keeps filename-safe characters. +// Generate canonical timestamp ID: YYYYMMDD_HHMMSS +// The optional tag is no longer embedded into the canonical ID; callers +// should record the tag separately and filenames are built with the helper +// below to append an optional tag suffix (e.g. _quick-scan). +function generateScanId(tag = 'scan') { + const now = new Date(); + const pad = (n) => String(n).padStart(2, '0'); + const YYYY = now.getFullYear(); + const MM = pad(now.getMonth() + 1); + const DD = pad(now.getDate()); + const hh = pad(now.getHours()); + const mm = pad(now.getMinutes()); + const ss = pad(now.getSeconds()); + return `${YYYY}${MM}${DD}_${hh}${mm}${ss}`; +} + + // Resolve a usable python executable: prefer venv, then system python3, then python + // Returns the executable name/path string or null if none found. + async function resolvePythonExecutable(scannerRoot) { + const { spawnSync } = require('child_process'); + const path = require('path'); + const envOverride = process.env.PYTHON_EXECUTABLE && process.env.PYTHON_EXECUTABLE.trim(); + const candidates = [ + envOverride, // Highest priority: explicit override + // Windows venv locations + path.join(scannerRoot, 'venv', 'Scripts', 'python.exe'), + path.join(scannerRoot, 'venv', 'Scripts', 'python'), + // POSIX venv location + path.join(scannerRoot, 'venv', 'bin', 'python'), + // System fallbacks + 'python3', + 'python', + 'py' // Windows launcher + ].filter(Boolean); + for (const c of candidates) { + try { + const res = spawnSync(c, ['--version'], { encoding: 'utf8' }); + if (!res.error && (res.status === 0 || (res.stdout || res.stderr))) { + return c; + } + } catch (e) { + // ignore and try next + } + } + return null; + } + +// Compose a filename-safe identifier from the canonical timestamp id and an optional tag. +function formatScanIdWithTag(scanId, tag) { + const cleanTag = tag ? String(tag).replace(/[^a-zA-Z0-9_-]/g, '-') : ''; + return `${scanId}${cleanTag ? '_' + cleanTag : ''}`; +} + +// Find a file in 'dir' that starts with prefix and ends with ext. Returns null if none found. +async function findFileWithPrefix(dir, prefix, ext) { + try { + const entries = await fs.readdir(dir); + for (const e of entries) { + if (e.startsWith(prefix) && e.endsWith(ext)) return path.join(dir, e); + } + } catch (e) { + return null; + } + return null; +} + +/** + * @swagger + * /api/scanner/test: + * get: + * summary: Test endpoint + * tags: [Vulnerability Scanner] + * responses: + * 200: + * description: Test successful + */ +router.get('/test', (req, res) => { + res.json({ message: 'Scanner API is working!', timestamp: new Date().toISOString() }); +}); + +/** + * @swagger + * components: + * schemas: + * ScanRequest: + * type: object + * required: + * - target_path + * properties: + * key: + * type: string + * description: internal plugin key (used by scanner) + * version: + * type: string + * description: plugin version if available + * available: + * type: boolean + * description: whether plugin directory exists in the scanner package + * enabled: + * type: boolean + * description: whether plugin is enabled in scanner config (null if unknown) + * severity_level: + * type: string + * description: default severity level assigned to findings from this plugin + * target_path: + * type: string + * description: Target path to scan + * example: "./routes" + * plugins: + * type: array + * items: + * type: string + * description: Specify the plugin to use + * example: + * - "JWTMissingProtectionPlugin" + * - "JWTConfigurationPlugin" + * output_format: + * type: string + * enum: [json, html] + * default: json + * description: Output format + * example: + * target_path: "./routes" + * plugins: + * - "JWTMissingProtectionPlugin" + * - "JWTConfigurationPlugin" + * output_format: "json" + * ScanResult: + * type: object + * properties: + * scan_id: + * type: string + * description: Scan ID + * target_path: + * type: string + * description: Scan target path + * scan_time: + * type: string + * format: date-time + * description: Scan time + * total_files: + * type: integer + * description: Total number of files scanned + * total_findings: + * type: integer + * description: Total number of findings + * severity_summary: + * type: object + * properties: + * CRITICAL: + * type: integer + * HIGH: + * type: integer + * MEDIUM: + * type: integer + * LOW: + * type: integer + * findings: + * type: array + * items: + * type: object + * properties: + * title: + * type: string + * severity: + * type: string + * file_path: + * type: string + * description: + * type: string + * securitySchemes: + * BearerAuth: + * type: http + * scheme: bearer + * bearerFormat: JWT + */ + +/** + * @swagger + * /api/scanner/health: + * get: + * summary: Scanner health check + * tags: [Vulnerability Scanner] + * responses: + * 200: + * description: Scanner is healthy + * content: + * application/json: + * schema: + * type: object + * properties: + * status: + * type: string + * example: healthy + * version: + * type: string + * example: "2.0.0" + */ +router.get('/health', async (req, res) => { + try { + const scannerPath = path.join(__dirname, '../Vulnerability_Tool_V2'); + const exists = await fs.access(scannerPath).then(() => true).catch(() => false); + + res.json({ + status: exists ? 'healthy' : 'scanner_not_found', + version: '2.0.0', + timestamp: new Date().toISOString(), + scanner_path: scannerPath + }); + } catch (error) { + res.status(500).json({ + status: 'error', + message: error.message + }); + } +}); + +/** + * @swagger + * /api/scanner/plugins: + * get: + * summary: Get available plugin list + * tags: [Vulnerability Scanner] + * responses: + * 200: + * description: Plugin list + * content: + * application/json: + * schema: + * type: object + * properties: + * plugins: + * type: array + * items: + * type: object + * properties: + * name: + * type: string + * description: + * type: string + */ +router.get('/plugins', async (req, res) => { + try { + // Dynamically construct available plugin list to reflect the scanner's plugins + const scannerRoot = path.join(__dirname, '../Vulnerability_Tool_V2'); + const pluginsDir = path.join(scannerRoot, 'plugins'); + + // Plugin mappings mirror Vulnerability_Tool_V2/core/scanner_engine.py + // Note: plugin key => folder name mapping (some plugins group under subpackage like jwt_security) + const pluginMappings = { + 'jwt_missing_protection': { name: 'JWTMissingProtectionPlugin', default_severity: 'HIGH', folder: 'jwt_security' }, + 'jwt_configuration': { name: 'JWTConfigurationPlugin', default_severity: 'MEDIUM', folder: 'jwt_security' }, + 'general_security': { name: 'general_security', default_severity: 'MEDIUM', folder: 'general_security' } + }; + + // Try to read scanner config to determine enabled state when possible + let enabledPluginsConfig = {}; + try { + const configPath = path.join(scannerRoot, 'config', 'scanner_config.yaml'); + const exists = await fs.access(configPath).then(() => true).catch(() => false); + if (exists) { + const yaml = require('yamljs'); + const cfg = yaml.load(configPath) || {}; + enabledPluginsConfig = cfg.plugins || {}; + } + } catch (e) { + // ignore config read errors; we'll fall back to defaults + } + + const plugins = []; + for (const [key, meta] of Object.entries(pluginMappings)) { + const pluginFolder = path.join(pluginsDir, meta.folder || key); + const available = await fs.access(pluginFolder).then(() => true).catch(() => false); + + // defaults + let description = ''; + let version = null; + + if (available) { + // Try to read __init__.py to get metadata heuristically + try { + const initPath = path.join(pluginFolder, '__init__.py'); + const initExists = await fs.access(initPath).then(() => true).catch(() => false); + if (initExists) { + const content = await fs.readFile(initPath, 'utf8'); + // attempt to extract description and version strings from get_plugin_info + const descMatch = content.match(/description\s*[:=]\s*['\"]([\s\S]*?)['\"]/i); + const verMatch = content.match(/version\s*[:=]\s*['\"]([\w\.\-]+)['\"]/i); + if (descMatch) description = descMatch[1].trim(); + if (verMatch) version = verMatch[1].trim(); + } + } catch (e) { + // best-effort only + } + } + + // fallback description if not found + if (!description) { + if (meta.name === 'general_security') description = 'Detect generic security issues such as hardcoded secrets, DB URLs and permissive CORS.'; + else if (meta.name === 'JWTMissingProtectionPlugin') description = 'Detect missing JWT protection in API endpoints'; + else if (meta.name === 'JWTConfigurationPlugin') description = 'Validate JWT configuration security'; + } + + const cfg = enabledPluginsConfig[key]; + // Follow scanner_v2.py semantics: general_security should be enabled by default if not present in config + let enabled = null; + if (cfg && typeof cfg.enabled === 'boolean') enabled = cfg.enabled; + else if (key === 'general_security') enabled = true; + + plugins.push({ + key, + name: meta.name, + description, + version, + severity_level: meta.default_severity, + available, + enabled + }); + } + + res.json({ plugins }); + } catch (error) { + res.status(500).json({ + success: false, + error: error.message + }); + } +}); + +/** + * @swagger + * /api/scanner/scan: + * post: + * summary: Start security scan + * tags: [Vulnerability Scanner] + * security: + * - BearerAuth: [] + * requestBody: + * required: true + * content: + * application/json: + * schema: + * $ref: '#/components/schemas/ScanRequest' + * responses: + * 200: + * description: Scan started successfully + * content: + * application/json: + * schema: + * type: object + * properties: + * scan_id: + * type: string + * message: + * type: string + * status_url: + * type: string + * 400: + * description: Request parameter error + * 500: + * description: Server error + */ +router.post('/scan', async (req, res) => { + try { + const { target_path, plugins, output_format = 'json' } = req.body; + + if (!target_path) { + return res.status(400).json({ + success: false, + error: 'target_path is required' + }); + } + + // Validate target path + const targetExists = await fs.access(target_path).then(() => true).catch(() => false); + if (!targetExists) { + return res.status(400).json({ + success: false, + error: `Target path does not exist: ${target_path}` + }); + } + + const scanId = generateScanId(); + // For normal async scans, use default tag 'scan' + const scanTag = 'scan'; + + // Start asynchronous scan and pass tag so filenames can include it as a suffix + startPythonScan(scanId, scanTag, target_path, plugins, output_format); + + res.json({ + scan_id: scanId, + message: 'Scan started successfully', + status_url: `/api/scanner/scan/${scanId}/status` + }); + + } catch (error) { + res.status(500).json({ + success: false, + error: error.message + }); + } +}); + +/** + * @swagger + * /api/scanner/scan/{scanId}/status: + * get: + * summary: Get scan status + * tags: [Vulnerability Scanner] + * parameters: + * - in: path + * name: scanId + * required: true + * schema: + * type: string + * description: ScanID + * responses: + * 200: + * description: Scan Status + * content: + * application/json: + * schema: + * type: object + * properties: + * scan_id: + * type: string + * status: + * type: string + * enum: [running, completed, failed] + * progress: + * type: integer + * message: + * type: string + * 404: + * description: Scan ID does not exist + */ +router.get('/scan/:scanId/status', async (req, res) => { + const { scanId } = req.params; + let scanInfo = activeScanners.get(scanId); + + if (!scanInfo) { + // Try to load persisted report files as a fallback (project reports or scanner reports) + try { + const reportsDir = path.join(process.cwd(), 'reports'); + const jsonPrefix = `Vulnerability_Scan_Result_${scanId}`; + const projectReportJson = await findFileWithPrefix(reportsDir, jsonPrefix, '.json'); + + if (projectReportJson) { + const data = await fs.readFile(projectReportJson, 'utf8'); + scanInfo = { status: 'completed', result: JSON.parse(data) }; + } else { + // Try HTML in scanner's reports dir + const scannerReportsDir = path.join(process.cwd(), 'Vulnerability_Tool_V2', 'reports'); + const htmlPrefix = `Vulnerability_Scan_Report_${scanId}`; + const scannerReportHtml = await findFileWithPrefix(scannerReportsDir, htmlPrefix, '.html'); + + if (scannerReportHtml) { + const html = await fs.readFile(scannerReportHtml, 'utf8'); + // crude extraction: count finding blocks and try to read embedded summary JSON + const findings = []; + const findingRegex = /
([\s\S]*?)<\/div>/g; + let m; + while ((m = findingRegex.exec(html)) !== null) { + findings.push({ title: m[1].trim() }); + } + + // try to extract a summary JSON blob if present + const jsonBlobMatch = html.match(/\{[\s\S]*?\}/); + let summary = {}; + if (jsonBlobMatch) { + try { summary = JSON.parse(jsonBlobMatch[0]); } catch (e) { summary = {}; } + } + + scanInfo = { status: 'completed', result: { scan_info: summary.scan_info || {}, summary: summary.summary || {}, findings: findings } }; + } + } + } catch (e) { + // ignore and fall through to 404 + } + } + if (!scanInfo) { + return res.status(404).json({ + success: false, + error: 'Scan ID not found' + }); + } + + res.json({ + scan_id: scanId, + status: scanInfo.status, + progress: scanInfo.progress, + message: scanInfo.message + }); +}); + +/** + * @swagger + * /api/scanner/scan/{scanId}/result: + * get: + * summary: Get scan result + * tags: [Vulnerability Scanner] + * parameters: + * - in: path + * name: scanId + * required: true + * schema: + * type: string + * description: Scan ID + * responses: + * 200: + * description: Scan result + * content: + * application/json: + * schema: + * $ref: '#/components/schemas/ScanResult' + * 202: + * description: Scan not completed yet + * 404: + * description: Scan ID does not exist + */ +router.get('/scan/:scanId/result', async (req, res) => { + const { scanId } = req.params; + const scanInfo = activeScanners.get(scanId); + + if (!scanInfo) { + return res.status(404).json({ + success: false, + error: 'Scan ID not found' + }); + } + + if (scanInfo.status !== 'completed') { + return res.status(202).json({ + success: false, + error: 'Scan not completed yet', + status: scanInfo.status + }); + } + + if (!scanInfo.result) { + return res.status(500).json({ + success: false, + error: 'Scan result not available' + }); + } + + // Normalize response: ensure scan_id matches requested scanId and return summary before findings + const fullResult = scanInfo.result || {}; + const summary = fullResult.summary || fullResult.scan_info || {}; + const findings = fullResult.findings || fullResult.issues || []; + + const responsePayload = { + scan_id: scanId, + summary: { + total_findings: summary.total || summary.total_findings || (Array.isArray(findings) ? findings.length : 0), + files_scanned: summary.files_scanned || (summary.stats && summary.stats.files_scanned) || (fullResult.scan_info && fullResult.scan_info.stats && fullResult.scan_info.stats.files_scanned) || null, + by_severity: summary.by_severity || summary.severity_summary || fullResult.by_severity || null, + by_plugin: summary.by_plugin || fullResult.by_plugin || null + }, + findings: findings + }; + + res.json(responsePayload); +}); + +/** + * @swagger + * /api/scanner/scan/{scanId}/report: + * get: + * summary: Download scan report + * tags: [Vulnerability Scanner] + * parameters: + * - in: path + * name: scanId + * required: true + * schema: + * type: string + * description: Scan ID + * - in: query + * name: format + * schema: + * type: string + * enum: [html, json] + * default: html + * description: Report format + * responses: + * 200: + * description: Report file + * content: + * text/html: + * schema: + * type: string + * application/json: + * schema: + * type: object + * 404: + * description: Scan ID does not exist + */ +router.get('/scan/:scanId/report', async (req, res) => { + const { scanId } = req.params; + const { format = 'html' } = req.query; + console.log('REPORT request:', { scanId, format, query: req.query }); + const scanInfo = activeScanners.get(scanId); + + if (!scanInfo) { + return res.status(404).json({ + success: false, + error: 'Scan ID not found' + }); + } + + if (scanInfo.status !== 'completed') { + return res.status(202).json({ + success: false, + error: 'Scan not completed yet' + }); + } + + if (format === 'html' && scanInfo.result) { + // Persist and return HTML report. Prefer the scanner's reports dir for storage. + try { + const scannerReportsDir = path.join(__dirname, '../Vulnerability_Tool_V2', 'reports'); + await fs.mkdir(scannerReportsDir, { recursive: true }); + const htmlPath = path.join(scannerReportsDir, `Vulnerability_Scan_Report_${scanId}.html`); + + // First try to use Python renderer if present + const pythonRenderer = path.join(__dirname, '../Vulnerability_Tool_V2/tools/render_from_json.py'); + const scannerPath = path.join(__dirname, '../Vulnerability_Tool_V2'); + + if (await fs.access(pythonRenderer).then(() => true).catch(() => false)) { + // write JSON temp file (in scanner reports dir) + const tmpJson = path.join(scannerReportsDir, `tmp_${scanId}.json`); + await fs.writeFile(tmpJson, JSON.stringify(scanInfo.result, null, 2)); + + // Try venv python first, then system python3, then python + const pythonCandidates = [ + path.join(scannerPath, 'venv', 'bin', 'python'), + 'python3', + 'python' + ]; + + let spawnRes = null; + let usedPython = null; + for (const py of pythonCandidates) { + try { + spawnRes = spawnSync(py, [pythonRenderer, tmpJson, htmlPath], { cwd: scannerPath, encoding: 'utf8' }); + } catch (e) { + spawnRes = { error: e }; + } + if (spawnRes && !spawnRes.error && spawnRes.status === 0) { + usedPython = py; + break; + } + } + + // remove tmp + try { await fs.unlink(tmpJson); } catch (e) {} + + // If python helper failed or file still missing, fallback to JS renderer + const finalHtmlExists = await fs.access(htmlPath).then(() => true).catch(() => false); + if (!finalHtmlExists || !usedPython) { + const html = generateHTMLReport(scanInfo.result); + await fs.writeFile(htmlPath, html); + } + } else { + // No python helper available; use JS renderer + const html = generateHTMLReport(scanInfo.result); + await fs.writeFile(htmlPath, html); + } + + // Prefer scanner reports dir, but as a fallback check project reports dir for any legacy files + const projectReportsDir = path.join(__dirname, '../reports'); + + // Try to find the actual HTML file which may include an optional tag suffix + let finalPath = await findFileWithPrefix(scannerReportsDir, `Vulnerability_Scan_Report_${scanId}`, '.html'); + if (!finalPath) finalPath = await findFileWithPrefix(projectReportsDir, `Vulnerability_Scan_Report_${scanId}`, '.html'); + if (!finalPath) finalPath = htmlPath; // fallback to whatever we wrote earlier + + // record chosen path and stream it + scanInfo.reportPath = finalPath; + const htmlContent = await fs.readFile(finalPath, 'utf-8'); + res.setHeader('Content-Type', 'text/html'); + res.setHeader('Content-Disposition', `attachment; filename="${path.basename(finalPath)}"`); + res.send(htmlContent); + return; + } catch (err) { + res.status(500).json({ success: false, error: 'Failed to generate HTML report', details: err.message }); + return; + } + } else if (format === 'json') { + // attempt to find persisted json with optional tag; prefer scanner reports dir + const scannerReportsDir = path.join(__dirname, '../Vulnerability_Tool_V2/reports'); + const projectReportsDir = path.join(__dirname, '../reports'); + let jsonPath = await findFileWithPrefix(scannerReportsDir, `Vulnerability_Scan_Result_${scanId}`, '.json'); + if (!jsonPath) jsonPath = await findFileWithPrefix(projectReportsDir, `Vulnerability_Scan_Result_${scanId}`, '.json'); + if (jsonPath) res.setHeader('Content-Disposition', `attachment; filename="${path.basename(jsonPath)}"`); + else res.setHeader('Content-Disposition', `attachment; filename=\"Vulnerability_Scan_Result_${scanId}.json\"`); + res.json(scanInfo.result); + } else { + res.status(400).json({ + success: false, + error: 'Invalid format or report not available' + }); + } +}); + +// Debug endpoint: return raw python stdout and JSON candidates for a scan (useful for diagnosing parsing issues) +router.get('/scan/:scanId/raw', (req, res) => { + const { scanId } = req.params; + const scanInfo = activeScanners.get(scanId); + if (!scanInfo) { + return res.status(404).json({ success: false, error: 'Scan ID not found' }); + } + + const raw = scanInfo.rawOutput || ''; + const candidates = collectJSONCandidates(raw); + res.json({ scan_id: scanId, status: scanInfo.status, progress: scanInfo.progress, raw_preview: raw.slice(0, 4000), candidate_count: candidates.length, candidates: candidates.slice(-3) }); +}); + +/** + * @swagger + * /api/scanner/quick-scan: + * post: + * summary: Quick synchronous scan + * tags: [Vulnerability Scanner] + * security: + * - BearerAuth: [] + * requestBody: + * required: true + * content: + * application/json: + * schema: + * $ref: '#/components/schemas/ScanRequest' + * responses: + * 200: + * description: Scan result + * content: + * application/json: + * schema: + * $ref: '#/components/schemas/ScanResult' + */ +router.post('/quick-scan', async (req, res) => { + try { + const { target_path, plugins, output_format = 'json' } = req.body; + + if (!target_path) { + return res.status(400).json({ + success: false, + error: 'target_path is required' + }); + } + // Validate target path exists (same as the async /scan endpoint) + const targetExists = await fs.access(target_path).then(() => true).catch(() => false); + if (!targetExists) { + return res.status(400).json({ + success: false, + error: `Target path does not exist: ${target_path}` + }); + } + + const scanId = generateScanId(); + const scanTag = 'quick-scan'; + const scanIdWithTag = formatScanIdWithTag(scanId, scanTag); + const result = await runPythonScanSync(target_path, plugins, output_format); + + // persist into activeScanners so subsequent report/status endpoints can find it + const scanInfo = { + status: 'completed', + progress: 100, + message: 'Quick scan completed', + result: result, + tag: scanTag, + scan_time: new Date().toISOString() + }; + + // write report files into the scanner's own reports dir for later retrieval + try { + const scannerReportsDir = path.join(__dirname, '../Vulnerability_Tool_V2', 'reports'); + await fs.mkdir(scannerReportsDir, { recursive: true }); + const jsonPath = path.join(scannerReportsDir, `Vulnerability_Scan_Result_${scanIdWithTag}.json`); + await fs.writeFile(jsonPath, JSON.stringify(result, null, 2)); + + // also generate HTML report if requested or default format + if (output_format === 'html' || output_format === 'json') { + // Prefer the scanner's Python renderer for consistent/identical HTML output + const pythonRenderer = path.join(__dirname, '../Vulnerability_Tool_V2/tools/render_from_json.py'); + const htmlPath = path.join(scannerReportsDir, `Vulnerability_Scan_Report_${scanIdWithTag}.html`); + const tmpJson = path.join(scannerReportsDir, `tmp_${scanIdWithTag}.json`); + await fs.writeFile(tmpJson, JSON.stringify(result, null, 2)); + let wroteHtml = false; + if (await fs.access(pythonRenderer).then(() => true).catch(() => false)) { + // try to run python helper (best-effort without blocking server startup) + const { spawnSync } = require('child_process'); + const scannerPath = path.join(__dirname, '../Vulnerability_Tool_V2'); + const pythonCandidates = [ + path.join(scannerPath, 'venv', 'bin', 'python'), + 'python3', + 'python' + ]; + for (const py of pythonCandidates) { + try { + const spawnRes = spawnSync(py, [pythonRenderer, tmpJson, htmlPath], { cwd: scannerPath, encoding: 'utf8' }); + if (!spawnRes.error && spawnRes.status === 0) { + wroteHtml = true; + break; + } + } catch (e) { + // ignore and try next + } + } + } + + // remove tmp json + try { await fs.unlink(tmpJson); } catch (e) {} + + if (!wroteHtml) { + // fallback to JS renderer + const html = generateHTMLReport(result); + await fs.writeFile(htmlPath, html); + } + scanInfo.reportPath = htmlPath; + } + } catch (e) { + // non-fatal: keep scanInfo in memory but log message + scanInfo.message += `; Failed to persist reports: ${e.message}`; + } + + activeScanners.set(scanId, scanInfo); + + // Ensure result's own scan_id (if any) doesn't override our generated scanId + const responsePayload = Object.assign({}, result || {}); + responsePayload.scan_id = scanId; + responsePayload.target_path = target_path; + responsePayload.scan_time = scanInfo.scan_time; + + res.json(responsePayload); + + } catch (error) { + res.status(500).json({ + success: false, + error: error.message + }); + } +}); + +// Start asynchronous Python scan +function startPythonScan(scanId, scanTag, targetPath, plugins, outputFormat) { + activeScanners.set(scanId, { + status: 'running', + progress: 0, + message: 'Scan initiated', + tag: scanTag + }); + + const scannerPath = path.join(__dirname, '../Vulnerability_Tool_V2'); + const scriptPath = path.join(scannerPath, 'scanner_v2.py'); + + // Resolve a usable python executable: prefer venv, then system python3, then python + function resolvePythonExecutableSync(scannerRoot) { + const { spawnSync } = require('child_process'); + const envOverride = process.env.PYTHON_EXECUTABLE && process.env.PYTHON_EXECUTABLE.trim(); + const candidates = [ + envOverride, + path.join(scannerRoot, 'venv', 'Scripts', 'python.exe'), + path.join(scannerRoot, 'venv', 'Scripts', 'python'), + path.join(scannerRoot, 'venv', 'bin', 'python'), + 'python3', + 'python', + 'py' + ].filter(Boolean); + for (const c of candidates) { + try { + const res = spawnSync(c, ['--version'], { encoding: 'utf8' }); + if (!res.error && (res.status === 0 || (res.stdout || res.stderr))) { + return c; + } + } catch (e) { + // ignore and try next + } + } + return null; + } + + const args = ['--target', targetPath, '--format', outputFormat]; + + const pythonExec = resolvePythonExecutableSync(scannerPath); + if (!pythonExec) { + const scanInfo = activeScanners.get(scanId); + if (scanInfo) { + scanInfo.status = 'failed'; + scanInfo.progress = 0; + scanInfo.message = 'No Python executable found. Expected either Vulnerability_Tool_V2/venv/bin/python or system python3. Please create a venv and install dependencies: `python3 -m venv Vulnerability_Tool_V2/venv && source Vulnerability_Tool_V2/venv/bin/activate && pip install -r Vulnerability_Tool_V2/requirements.txt`'; + scanInfo.rawOutput = (scanInfo.rawOutput || '') + '\n\nSPAWN_ERROR: No python executable found'; + } + return; + } + + let pythonProcess; + try { + // Enforce UTF-8 so emoji / unicode characters don't break on Windows consoles (keep emoji output intact) + const childEnv = Object.assign({}, process.env, { + PYTHONUTF8: '1', + PYTHONIOENCODING: 'utf-8', + SCANNER_PROGRESS: '1' // signal Python side to emit incremental progress lines + }); + pythonProcess = spawn(pythonExec, [scriptPath, ...args], { + cwd: scannerPath, + env: childEnv + }); + } catch (spawnErr) { + const scanInfo = activeScanners.get(scanId); + if (scanInfo) { + scanInfo.status = 'failed'; + scanInfo.progress = 0; + scanInfo.message = `Failed to start python scanner using '${pythonExec}': ${spawnErr.message || String(spawnErr)}. Tried candidates: venv/bin/python -> python3 -> python`; + scanInfo.rawOutput = (scanInfo.rawOutput || '') + '\n\nSPAWN_ERROR:\n' + (spawnErr.stack || String(spawnErr)); + } + return; + } + + // handle runtime errors from the child process (e.g., exec failures) + pythonProcess.on('error', (err) => { + const scanInfo = activeScanners.get(scanId); + if (scanInfo) { + scanInfo.status = 'failed'; + scanInfo.progress = 0; + scanInfo.message = `Python process error: ${err.message || String(err)}`; + scanInfo.rawOutput = (scanInfo.rawOutput || '') + '\n\nPROCESS_ERROR:\n' + (err.stack || String(err)); + // persist raw output for post-mortem + (async () => { + try { + const reportsDir = path.join(__dirname, '../reports'); + await fs.mkdir(reportsDir, { recursive: true }); + const rawPath = path.join(reportsDir, `raw_${scanId}.log`); + await fs.writeFile(rawPath, scanInfo.rawOutput || (err.stack || String(err))); + scanInfo.rawOutputPath = rawPath; + } catch (e) { + // nothing else to do + } + })(); + } + }); + + let outputData = ''; + let errorData = ''; + let lineBuffer = ''; + // save raw output for debugging + + pythonProcess.stdout.on('data', (data) => { + const chunk = data.toString(); + outputData += chunk; + const scanInfo = activeScanners.get(scanId); + lineBuffer += chunk; + // Process complete lines for progress markers + let lines = lineBuffer.split(/\r?\n/); + lineBuffer = lines.pop(); // keep last partial line + for (const line of lines) { + // Progress sentinel format: PROGRESS|| + const m = line.match(/^PROGRESS\|(\d{1,3})(?:\|(.*))?$/); + if (m && scanInfo && scanInfo.status === 'running') { + const pct = Math.max(0, Math.min(100, parseInt(m[1], 10))); + scanInfo.progress = pct; + if (m[2]) { + const msg = m[2].trim(); + // 只在仍处于 running 阶段时更新 message,完成后的成功文案保持原样 + if (msg) scanInfo.message = msg; + } + } + } + }); + + pythonProcess.stderr.on('data', (data) => { + errorData += data.toString(); + }); + + pythonProcess.on('close', (code) => { + console.log('Full Python output:', outputData); + + const scanInfo = activeScanners.get(scanId); + if (scanInfo) scanInfo.rawOutput = outputData; + if (!scanInfo) return; + + if (code === 0) { + try { + const result = parseBestJSON(outputData); + scanInfo.status = 'completed'; + scanInfo.progress = 100; + scanInfo.message = 'Scan completed successfully'; + scanInfo.result = result; + + // If there is HTML output, save it as well + if (outputFormat === 'html') { + scanInfo.htmlReport = generateHTMLReport(result); + // persist into project reports dir for easy discovery (async IIFE) + (async () => { + try { + const reportsDir = path.join(__dirname, '../reports'); + await fs.mkdir(reportsDir, { recursive: true }); + const idWithTag = formatScanIdWithTag(scanId, scanTag); + const htmlPath = path.join(reportsDir, `Vulnerability_Scan_Report_${idWithTag}.html`); + await fs.writeFile(htmlPath, scanInfo.htmlReport); + scanInfo.reportPath = htmlPath; + } catch (e) { + // if writing to project reports fails, leave as-is and record message + scanInfo.message = (scanInfo.message || '') + `; Failed to persist html report: ${e.message}`; + } + })(); + } + } catch (error) { + // Persist raw output to disk for post-mortem analysis + (async () => { + try { + const reportsDir = path.join(__dirname, '../reports'); + await fs.mkdir(reportsDir, { recursive: true }); + const rawPath = path.join(reportsDir, `raw_${scanId}.log`); + await fs.writeFile(rawPath, outputData); + scanInfo.rawOutputPath = rawPath; + scanInfo.status = 'failed'; + scanInfo.message = `Failed to parse scan result: ${error.message}. Raw output saved to: ${rawPath}`; + } catch (fsErr) { + scanInfo.status = 'failed'; + scanInfo.message = `Failed to parse scan result: ${error.message}. Also failed to write raw output: ${fsErr.message}`; + } + })(); + } + } else { + // Try to salvage a result if the python process printed JSON despite non-zero exit + try { + const maybeResult = parseBestJSON(outputData); + scanInfo.status = 'completed'; + scanInfo.progress = 100; + // Keep user-facing message consistent with successful scans + const detailMsg = `Non-zero exit code ${code} but output parsed successfully`; + scanInfo.message = 'Scan completed successfully'; + // Preserve diagnostic detail separately (not exposed unless you add to response) + scanInfo.diagnostic = detailMsg; + scanInfo.result = maybeResult; + } catch (parseErr) { + // Save raw output for non-zero exit as well + (async () => { + try { + const reportsDir = path.join(__dirname, '../reports'); + await fs.mkdir(reportsDir, { recursive: true }); + const rawPath = path.join(reportsDir, `raw_${scanId}.log`); + await fs.writeFile(rawPath, outputData + '\n\nSTDERR:\n' + errorData); + scanInfo.rawOutputPath = rawPath; + scanInfo.status = 'failed'; + scanInfo.message = `Scan failed with code ${code}. Raw output saved to: ${rawPath}`; + } catch (fsErr) { + scanInfo.status = 'failed'; + scanInfo.message = `Scan failed with code ${code}: ${errorData}. Also failed to write raw output: ${fsErr.message}`; + } + })(); + } + } + }); +} + +// Run Python scan synchronously +function runPythonScanSync(targetPath, plugins, outputFormat) { + return new Promise((resolve, reject) => { + const scannerPath = path.join(__dirname, '../Vulnerability_Tool_V2'); + const scriptPath = path.join(scannerPath, 'scanner_v2.py'); + + // Use the requested output format (was hard-coded to 'json') + const args = ['--target', targetPath, '--format', outputFormat || 'json']; + (async () => { + const pythonExec = await resolvePythonExecutable(scannerPath); + if (!pythonExec) { + return reject(new Error('No usable Python executable found. Please create Vulnerability_Tool_V2/venv or ensure python3 is available and scanner dependencies are installed.')); + } + + const childEnv = Object.assign({}, process.env, { + PYTHONUTF8: '1', + PYTHONIOENCODING: 'utf-8' + }); + const pythonProcess = spawn(pythonExec, [scriptPath, ...args], { + cwd: scannerPath, + env: childEnv + }); + + let outputData = ''; + let errorData = ''; + + pythonProcess.stdout.on('data', (data) => { + outputData += data.toString(); + }); + + pythonProcess.stderr.on('data', (data) => { + errorData += data.toString(); + }); + + pythonProcess.on('close', (code) => { + const attemptParse = () => { + try { return parseBestJSON(outputData); } catch (e) { return null; } + }; + const resultObj = attemptParse(); + if (resultObj) { + // 任何情况下(包括非零退出)只要成功解析就返回结果 + resolve(resultObj); + return; + } + // 若第一次解析失败,再尝试剪掉 stderr 附加的尾部(常见编码错误行) + let trimmed = outputData.replace(/Unexpected error:[\s\S]*$/i, '').trim(); + if (!resultObj && trimmed !== outputData) { + try { + const salvage = parseBestJSON(trimmed); + return resolve(salvage); + } catch (_) {} + } + // 仍失败,写 raw 输出 + (async () => { + try { + const reportsDir = path.join(__dirname, '../reports'); + await fs.mkdir(reportsDir, { recursive: true }); + const rawPath = path.join(reportsDir, `raw_sync_${Date.now()}.log`); + await fs.writeFile(rawPath, outputData + '\n\nSTDERR:\n' + errorData); + reject(new Error(`Scan failed with code ${code}. Raw output saved to: ${rawPath}`)); + } catch (fsErr) { + reject(new Error(`Scan failed with code ${code}: ${errorData}. Also failed to write raw output: ${fsErr.message}`)); + } + })(); + }); + })(); + }); +} + +// Collect JSON candidates from text by tracking balanced braces/brackets +function collectJSONCandidates(text) { + if (!text || typeof text !== 'string') return []; + + const candidates = []; + const len = text.length; + let inString = false; + let escape = false; + let depth = 0; + let start = -1; + + for (let i = 0; i < len; i++) { + const ch = text[i]; + if (inString) { + if (escape) { escape = false; } + else if (ch === '\\') { escape = true; } + else if (ch === '"') { inString = false; } + continue; + } + if (ch === '"') { inString = true; continue; } + + if ((ch === '{' || ch === '[') && start === -1) { + start = i; + depth = 1; + continue; + } + + if (start !== -1) { + if (ch === '{' || ch === '[') depth++; + else if (ch === '}' || ch === ']') { + depth--; + if (depth === 0) { + candidates.push(text.substring(start, i + 1).trim()); + start = -1; + } + } + } + } + return candidates; +} + +// Attempt to parse the best JSON candidate from text, with progressive trimming if needed +function parseBestJSON(text) { + const candidates = collectJSONCandidates(text); + if (!candidates || candidates.length === 0) throw new Error('No JSON object or array found in output'); + + const maxTrimAttempts = 200; // bounded attempts to trim tail + for (let ci = candidates.length - 1; ci >= 0; ci--) { + let cand = candidates[ci]; + // try direct parse + try { + return JSON.parse(cand); + } catch (err) { + // if parse failed, try trimming tail progressively (but bounded) + for (let t = 0; t < maxTrimAttempts && cand.length > 2; t++) { + // remove up to t+1 chars from end + const newLen = Math.max(0, cand.length - (t + 1)); + const substr = cand.substring(0, newLen).trim(); + try { + return JSON.parse(substr); + } catch (e2) { + // continue trimming + } + } + } + } + + throw new Error('Failed to parse any JSON candidate from output'); +} + +// Generate HTML report +function generateHTMLReport(scanResult) { + const { summary, findings } = scanResult; + + return ` + + + + NutriHelp Vulnerability Scan Report + + + +
+

🔒 NutriHelp Vulnerability Scanner V2.0

+

Scan Time: ${new Date().toISOString()}

+
+ +
+
+

${summary.files_scanned}

+

Files Scanned

+
+
+

${findings.length}

+

Total Issues

+
+
+

${summary.by_severity.CRITICAL || 0}

+

Critical

+
+
+

${summary.by_severity.HIGH || 0}

+

High

+
+
+ +

📋 Detailed Findings

+ ${findings.map(finding => ` +
+

${finding.title} (${finding.severity})

+

File: ${finding.file_path}

+

Description: ${finding.description}

+

Plugin: ${finding.plugin_name}

+
+ `).join('')} + +`; +} + +module.exports = router; \ No newline at end of file diff --git a/scripts/bootstrap.js b/scripts/bootstrap.js new file mode 100644 index 0000000..939d477 --- /dev/null +++ b/scripts/bootstrap.js @@ -0,0 +1,74 @@ +#!/usr/bin/env node +/** + * bootstrap.js + * One-shot developer setup script: Node deps, scanner venv deps, env template, validation. + * Modes: + * full (default) - Used by "npm run setup" (hard fail on validation errors) + * postinstall - Used automatically after npm install (soft fail: warns only) + */ +const { spawnSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); + +const modeArg = process.argv.find(a => a.startsWith('--mode=')); +const mode = modeArg ? modeArg.split('=')[1] : 'full'; +const soft = mode === 'postinstall'; + +function log(msg){ console.log(`[bootstrap] ${msg}`); } +function warn(msg){ console.warn(`[bootstrap] WARN: ${msg}`); } +function run(cmd,args,opts){ + const r = spawnSync(cmd,args,Object.assign({stdio:'inherit'},opts)); + if (r.status !== 0) { + console.error(`Command failed: ${cmd} ${args.join(' ')}`); + if (!soft) process.exit(r.status || 1); + else warn(`Continuing despite failure (mode=${mode})`); + } +} + +// 1. Install Node dependencies if node_modules missing +if (!fs.existsSync(path.join(__dirname,'..','node_modules'))) { + log('Installing Node dependencies (npm ci fallback to npm install)...'); + let res = spawnSync('npm',['ci'],{stdio:'inherit'}); + if (res.status !== 0) { + log('npm ci failed, trying npm install'); + res = spawnSync('npm',['install'],{stdio:'inherit'}); + if (res.status !== 0) { + if (!soft) process.exit(res.status); + else warn('Node dependency installation failed during postinstall mode. Project may be unusable.'); + } + } +} else { + log('node_modules present, skipping npm install'); +} + +// 2. Ensure .env (create from example if available) +const envPath = path.join(__dirname,'..','.env'); +const examplePath = path.join(__dirname,'..','.env.example'); +if (!fs.existsSync(envPath)) { + if (fs.existsSync(examplePath)) { + fs.copyFileSync(examplePath, envPath); + log('Created .env from .env.example'); + } else { + fs.writeFileSync(envPath, '# Auto-generated minimal env (edit with real internal secrets)\nJWT_SECRET=change_me_replace_before_prod\nSUPABASE_URL=your_supabase_url\nSUPABASE_ANON_KEY=your_public_anon_key\nPORT=3000\n'); + log('Generated minimal .env (placeholders).'); + } +} else { + log('.env already exists, not touching'); +} + +// 3. Prepare scanner (venv + deps) +log('Preparing vulnerability scanner environment...'); +run(process.execPath, [path.join(__dirname,'prepareScanner.js')]); + +// 4. Validate environment +log('Validating environment variables...'); +const val = spawnSync('npm',['run','validate-env'],{stdio:'inherit'}); +if (val.status !== 0) { + if (soft) { + warn('Environment validation reported issues (non-fatal in postinstall mode).'); + } else { + process.exit(val.status); + } +} + +console.log(`\n✅ Bootstrap complete (mode=${mode}). You can now run: npm start`); diff --git a/scripts/ci_check_vuln.py b/scripts/ci_check_vuln.py new file mode 100644 index 0000000..891ec53 --- /dev/null +++ b/scripts/ci_check_vuln.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +"""CI helper: check vulnerability_report.json and fail if CRITICAL findings exist. + +Exit codes: + 0 - no critical findings + 1 - error reading file or file missing + 2 - critical findings found +""" +import json +import sys +from pathlib import Path + + +def main(): + fn = Path('vulnerability_report.json') + if not fn.exists(): + print('vulnerability_report.json not found', file=sys.stderr) + return 1 + + try: + data = json.loads(fn.read_text(encoding='utf-8')) + except Exception as e: + print('Failed to read vulnerability_report.json:', e, file=sys.stderr) + return 1 + + try: + bysev = data.get('summary', {}).get('by_severity', {}) + crit = int(bysev.get('CRITICAL', 0)) + except Exception: + crit = 0 + + if crit > 0: + print(f'🚨 Found {crit} CRITICAL vulnerability(ies). Failing job as requested.') + findings = data.get('findings', []) or [] + topcrit = [f for f in findings if str(f.get('severity', '')).upper() == 'CRITICAL'] + for i, f in enumerate(topcrit[:5], 1): + title = f.get('title') or f.get('rule_name') or 'No title' + path = f.get('file_path') or f.get('file') or '' + print(f'{i}. {title} — {path}') + return 2 + + print('No critical findings. Proceeding.') + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/ensureScannerReady.js b/scripts/ensureScannerReady.js new file mode 100644 index 0000000..d64be4f --- /dev/null +++ b/scripts/ensureScannerReady.js @@ -0,0 +1,43 @@ +#!/usr/bin/env node +/** + * ensureScannerReady.js + * Lightweight check to confirm scanner venv & core dependencies exist; if not, call prepareScanner. + */ +const fs = require('fs'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +const scannerRoot = path.join(__dirname, '..', 'Vulnerability_Tool_V2'); +const venvDir = path.join(scannerRoot, 'venv'); +const markerPip = process.platform === 'win32' ? path.join(venvDir,'Scripts','pip.exe') : path.join(venvDir,'bin','pip'); + +function log(m){ console.log(`[ensure-scanner] ${m}`); } +function run(cmd, args, opts={}){ return spawnSync(cmd,args,Object.assign({encoding:'utf8'},opts)); } + +if (!fs.existsSync(scannerRoot)) { + log('Scanner root not found, nothing to ensure.'); + process.exit(0); +} + +let needPrepare = false; +if (!fs.existsSync(venvDir)) needPrepare = true; +if (!fs.existsSync(markerPip)) needPrepare = true; + +// quick module import probe (yaml, jinja2) using venv python if present +if (!needPrepare) { + const pyExe = process.platform === 'win32' ? path.join(venvDir,'Scripts','python.exe') : path.join(venvDir,'bin','python'); + if (fs.existsSync(pyExe)) { + const probe = run(pyExe, ['-c','import yaml,jinja2']); + if (probe.status !== 0) needPrepare = true; + } else { + needPrepare = true; + } +} + +if (needPrepare) { + log('Scanner environment incomplete; running prepare-scanner'); + const prep = run(process.execPath, [path.join(__dirname,'prepareScanner.js')], { stdio:'inherit' }); + process.exit(prep.status || 0); +} else { + log('Scanner environment is ready.'); +} diff --git a/scripts/gen_swagger.js b/scripts/gen_swagger.js new file mode 100644 index 0000000..b0dc115 --- /dev/null +++ b/scripts/gen_swagger.js @@ -0,0 +1,26 @@ +const yaml = require('yamljs'); +const swaggerJSDoc = require('swagger-jsdoc'); +const path = require('path'); +const fs = require('fs'); + +const base = yaml.load(path.join(process.cwd(), 'index.yaml')); +const opts = { + swaggerDefinition: { + openapi: base.openapi || base.swagger || '3.0.0', + info: base.info || { title: 'temp', version: '1.0.0' }, + servers: base.servers || [{ url: 'http://localhost' }] + }, + apis: [path.join(process.cwd(), 'routes', '**', '*.js'), path.join(process.cwd(), 'routes', '*.js')] +}; + +try { + const gen = swaggerJSDoc(opts); + const merged = JSON.parse(JSON.stringify(base)); + merged.paths = Object.assign({}, merged.paths || {}, gen.paths || {}); + merged.components = Object.assign({}, merged.components || {}, gen.components || {}); + const p = merged.paths['/api/scanner/scan']; + console.log(JSON.stringify(p, null, 2)); +} catch (e) { + console.error('ERROR', e && e.stack ? e.stack : e); + process.exit(1); +} diff --git a/scripts/prepareScanner.js b/scripts/prepareScanner.js new file mode 100644 index 0000000..b901403 --- /dev/null +++ b/scripts/prepareScanner.js @@ -0,0 +1,90 @@ +#!/usr/bin/env node +/** + * prepareScanner.js + * Recreates Python virtual environment for Vulnerability_Tool_V2 (idempotent) and installs dependencies. + * Safe to run multiple times. Skips work if already up to date. Gracefully degrades if Python is missing. + */ +const { spawnSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); + +const scannerRoot = path.join(__dirname, '..', 'Vulnerability_Tool_V2'); +const reqFile = path.join(scannerRoot, 'requirements.txt'); +const venvDir = path.join(scannerRoot, 'venv'); + +function log(msg){ console.log(`[prepare-scanner] ${msg}`); } +function warn(msg){ console.warn(`[prepare-scanner] WARN: ${msg}`); } +function err(msg){ console.error(`[prepare-scanner] ERROR: ${msg}`); } + +if (!fs.existsSync(scannerRoot)) { + warn(`Scanner directory not found at ${scannerRoot}, skipping.`); + process.exit(0); +} + +// Determine python executable candidates (prefer explicit override and local project .venv before global) +const localProjectVenv = process.platform === 'win32' + ? path.join(__dirname,'..','.venv','Scripts','python.exe') + : path.join(__dirname,'..','.venv','bin','python'); +const envOverride = process.env.PYTHON_EXECUTABLE && process.env.PYTHON_EXECUTABLE.trim(); +const pythonCandidates = [envOverride, localProjectVenv, 'python3', 'python', 'py'].filter(Boolean); +let pythonExe = null; +for (const c of pythonCandidates) { + try { + const res = spawnSync(c, ['--version'], { encoding: 'utf8' }); + if (!res.error && res.status === 0) { pythonExe = c; break; } + } catch (_) {} +} +if (!pythonExe) { + warn('No usable python interpreter (exit status 0) found. Skipping scanner setup.'); + warn('API will run; scanner endpoints will be unavailable until Python is installed.'); + process.exit(0); +} + +// Create venv if missing +if (!fs.existsSync(venvDir)) { + log(`Creating virtual environment: ${pythonExe} -m venv venv`); + const create = spawnSync(pythonExe, ['-m','venv','venv'], { cwd: scannerRoot, stdio:'inherit' }); + if (create.status !== 0) { + err('Failed to create scanner venv. You may create it manually then rerun this script.'); + process.exit(0); // degrade gracefully + } +} else { + log('Scanner venv already exists, skipping creation'); +} + +// Locate pip +const pipPath = process.platform === 'win32' + ? path.join(venvDir,'Scripts','pip.exe') + : path.join(venvDir,'bin','pip'); +if (!fs.existsSync(pipPath)) { + err(`pip not found at ${pipPath}`); + process.exit(1); +} + +if (!fs.existsSync(reqFile)) { + warn('requirements.txt not found, skipping dependency install'); + process.exit(0); +} + +// Dependency change detection marker +const marker = path.join(venvDir, '.deps_hash'); +let needInstall = true; +try { + const reqStat = fs.statSync(reqFile).mtimeMs; + const markerData = fs.existsSync(marker) ? fs.readFileSync(marker,'utf8') : ''; + if (markerData.trim() === String(reqStat)) needInstall = false; else fs.writeFileSync(marker, String(reqStat)); +} catch { /* ignore */ } + +if (!needInstall) { + log('Dependencies already up to date, skipping pip install'); + process.exit(0); +} + +log('Installing Python scanner dependencies...'); +const install = spawnSync(pipPath, ['install','-r','requirements.txt'], { cwd: scannerRoot, stdio:'inherit' }); +if (install.status !== 0) { + err('pip install failed'); + process.exit(1); +} +log('Scanner dependencies installed successfully.'); + diff --git a/scripts/rename_reports_security_to_vulnerability.py b/scripts/rename_reports_security_to_vulnerability.py new file mode 100644 index 0000000..5e1a60c --- /dev/null +++ b/scripts/rename_reports_security_to_vulnerability.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +""" +Safe batch rename script: +- Moves original files matching security_report_* and security_result_* into reports/legacy_backup/ +- Generates new filenames using timestamp extracted from filename (YYYYMMDD_HHMMSS) or file mtime +- Writes a CSV mapping original -> new in reports/legacy_backup/rename_map.csv +- Dry-run mode lists planned actions; use --apply to perform +""" +import argparse +import csv +import os +import re +import shutil +from datetime import datetime + +ROOT = os.path.dirname(os.path.dirname(__file__)) +REPORTS_DIR = os.path.join(ROOT, 'reports') +BACKUP_DIR = os.path.join(REPORTS_DIR, 'legacy_backup') + +PAT_REPORT = re.compile(r'security_report_(?P\d{8}_\d{6})(?:_(?P.+?))?\.html$') +PAT_RESULT = re.compile(r'security_result_(?P\d{8}_\d{6})(?:_(?P.+?))?\.json$') + + +def find_candidates(): + files = os.listdir(REPORTS_DIR) + candidates = [] + for fn in files: + if PAT_REPORT.match(fn) or PAT_RESULT.match(fn): + candidates.append(fn) + return sorted(candidates) + + +def extract_timestamp(fn): + m = PAT_REPORT.match(fn) or PAT_RESULT.match(fn) + if m: + ts = m.group('ts') + tag = m.group('tag') + return ts, tag + # fallback: use mtime + p = os.path.join(REPORTS_DIR, fn) + st = os.path.getmtime(p) + ts = datetime.fromtimestamp(st).strftime('%Y%m%d_%H%M%S') + return ts, None + + +def plan_rename(fn): + ts, tag = extract_timestamp(fn) + if fn.startswith('security_report_'): + newbase = f'Vulnerability_Scan_Report_{ts}' + ext = '.html' + else: + newbase = f'Vulnerability_Scan_Result_{ts}' + ext = '.json' + if tag: + newbase = newbase + '_' + tag + newfn = newbase + ext + return newfn + + +def ensure_backup_dir(): + os.makedirs(BACKUP_DIR, exist_ok=True) + + +def perform(dry_run=True): + candidates = find_candidates() + if not candidates: + print('No candidate files found.') + return 0 + plan = [] + for fn in candidates: + src = os.path.join(REPORTS_DIR, fn) + newfn = plan_rename(fn) + dst = os.path.join(REPORTS_DIR, newfn) + # avoid overwriting existing target: if exists, append a counter + if os.path.exists(dst): + base, ext = os.path.splitext(newfn) + i = 1 + while True: + alt = f"{base}._{i}{ext}" + altpath = os.path.join(REPORTS_DIR, alt) + if not os.path.exists(altpath): + dst = altpath + newfn = alt + break + i += 1 + plan.append((fn, newfn, src, dst)) + + print(f'Planned renames: {len(plan)}') + for old, newfn, src, dst in plan[:20]: + print(f'{old} -> {newfn}') + if len(plan) > 20: + print('...') + + if dry_run: + print('\nDry-run mode; no files moved. Use --apply to execute the moves.') + return 0 + + # perform + ensure_backup_dir() + map_csv = os.path.join(BACKUP_DIR, 'rename_map.csv') + with open(map_csv, 'w', newline='') as fh: + writer = csv.writer(fh) + writer.writerow(['original', 'new', 'backup_path']) + for old, newfn, src, dst in plan: + # move original to backup + bak = os.path.join(BACKUP_DIR, old) + if not os.path.exists(bak): + shutil.move(src, bak) + else: + # if backup exists, append counter + base, ext = os.path.splitext(old) + i = 1 + while True: + bak_alt = os.path.join(BACKUP_DIR, f"{base}._{i}{ext}") + if not os.path.exists(bak_alt): + shutil.move(src, bak_alt) + bak = bak_alt + break + i += 1 + # copy backup to new name in reports dir + shutil.copy(bak, dst) + writer.writerow([old, newfn, bak]) + print(f'Moved originals to {BACKUP_DIR} and wrote mapping to {map_csv}') + return 0 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--apply', action='store_true', help='Actually perform moves') + args = parser.parse_args() + perform(dry_run=not args.apply) diff --git a/server.js b/server.js index 129f9c7..cdd6c54 100644 --- a/server.js +++ b/server.js @@ -15,6 +15,14 @@ const helmet = require('helmet'); const cors = require("cors"); const swaggerUi = require("swagger-ui-express"); const yaml = require("yamljs"); +// swagger-jsdoc will be used at runtime to include JSDoc @swagger comments from route files +let swaggerJSDoc; +try { + swaggerJSDoc = require('swagger-jsdoc'); +} catch (e) { + // swagger-jsdoc may not be installed in some environments; we will fall back to static index.yaml + swaggerJSDoc = null; +} const { exec } = require("child_process"); const rateLimit = require('express-rate-limit'); const uploadRoutes = require('./routes/uploadRoutes'); @@ -100,7 +108,36 @@ const limiter = rateLimit({ app.use(limiter); // Swagger -const swaggerDocument = yaml.load("./index.yaml"); +let swaggerDocument = yaml.load("./index.yaml"); +// Remove externalDocs if present to avoid CORS issues +if (swaggerDocument && swaggerDocument.externalDocs) { + delete swaggerDocument.externalDocs; +} + +// If swagger-jsdoc is available, attempt to generate docs from JSDoc comments and merge paths +if (swaggerJSDoc) { + try { + const options = { + definition: { + openapi: '3.0.0', + }, + // Scan route files and controllers for @swagger JSDoc comments + apis: ["./routes/**/*.js", "./controller/**/*.js"], + }; + const generated = swaggerJSDoc(options); + // Merge generated.paths into the static swaggerDocument.paths (generated takes precedence) + swaggerDocument.paths = Object.assign({}, swaggerDocument.paths || {}, generated.paths || {}); + // Merge components.schemas if present + if (generated.components && generated.components.schemas) { + swaggerDocument.components = swaggerDocument.components || {}; + swaggerDocument.components.schemas = Object.assign({}, swaggerDocument.components.schemas || {}, generated.components.schemas); + } + } catch (e) { + console.error('Failed to generate swagger from JSDoc:', e && e.message ? e.message : e); + // keep swaggerDocument as loaded from index.yaml + } +} + app.use("/api-docs", swaggerUi.serve, swaggerUi.setup(swaggerDocument)); // Response time monitoring app.use(responseTimeLogger); diff --git a/test-Supabse.js b/test-Supabse.js new file mode 100644 index 0000000..d6362b3 --- /dev/null +++ b/test-Supabse.js @@ -0,0 +1,57 @@ +// testSupabase.js +const { createClient } = require('@supabase/supabase-js'); +require('dotenv').config(); + +const supabaseUrl = process.env.SUPABASE_URL; +const supabaseKey = process.env.SUPABASE_ANON_KEY; + +console.log('SUPABASE_URL:', supabaseUrl ? 'SET' : 'MISSING'); +console.log('SUPABASE_ANON_KEY:', supabaseKey ? 'SET' : 'MISSING'); + +const supabase = createClient(supabaseUrl, supabaseKey); + +async function testSecurityAssessmentsTable() { + console.log('Testing security_assessments table...'); + + try { + // 1. 测试查询权限 + console.log('\n1. Testing SELECT...'); + let { data: queryData, error: queryError } = await supabase + .from('security_assessments') + .select('*') + .limit(1); + + if (queryError) { + console.error('Query Error:', queryError); + } else { + console.log('Query successful, records found:', queryData.length); + } + + // 2. 测试插入权限 + console.log('\n2. Testing INSERT...'); + let { data: insertData, error: insertError } = await supabase + .from('security_assessments') + .insert([{ + timestamp: new Date().toISOString(), + overall_score: 75, + total_checks: 8, + passed_checks: 6, + failed_checks: 1, + warnings: 1, + critical_issues: 0, + risk_level: 'low', + detailed_results: { test: 'connection_test' } + }]); + + if (insertError) { + console.error('Insert Error:', insertError); + } else { + console.log('Insert successful:', insertData); + } + + } catch (err) { + console.error('Connection failed:', err.message); + } +} + +testSecurityAssessmentsTable(); \ No newline at end of file diff --git a/vulnerability_report.json b/vulnerability_report.json new file mode 100644 index 0000000..fc77778 --- /dev/null +++ b/vulnerability_report.json @@ -0,0 +1,710 @@ +{ + "scan_id": "fdb7026f-d7c3-414f-87fb-9b3a31c383aa", + "target": ".", + "timestamp": "2025-09-19T05:39:04.909528", + "findings": [ + { + "title": "Missing JWT Protection: GET /", + "severity": "MEDIUM", + "file_path": "jwt server.js", + "line_number": 11, + "description": "API endpoint GET / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/imageClassification.js", + "line_number": 19, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/upload.js", + "line_number": 5, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/waterIntake.js", + "line_number": 5, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/signup.js", + "line_number": 11, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: GET /ping", + "severity": "MEDIUM", + "file_path": "routes/loginDashboard.js", + "line_number": 11, + "description": "API endpoint GET /ping lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /ping endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/ping', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/ping', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/ping", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: GET /kpi", + "severity": "MEDIUM", + "file_path": "routes/loginDashboard.js", + "line_number": 22, + "description": "API endpoint GET /kpi lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /kpi endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/kpi', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/kpi', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/kpi", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: GET /daily", + "severity": "MEDIUM", + "file_path": "routes/loginDashboard.js", + "line_number": 29, + "description": "API endpoint GET /daily lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /daily endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/daily', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/daily', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/daily", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: GET /dau", + "severity": "MEDIUM", + "file_path": "routes/loginDashboard.js", + "line_number": 37, + "description": "API endpoint GET /dau lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /dau endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/dau', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/dau', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/dau", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: GET /top-failing-ips", + "severity": "MEDIUM", + "file_path": "routes/loginDashboard.js", + "line_number": 45, + "description": "API endpoint GET /top-failing-ips lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /top-failing-ips endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/top-failing-ips', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/top-failing-ips', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/top-failing-ips", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: GET /fail-by-domain", + "severity": "MEDIUM", + "file_path": "routes/loginDashboard.js", + "line_number": 52, + "description": "API endpoint GET /fail-by-domain lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /fail-by-domain endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/fail-by-domain', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/fail-by-domain', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/fail-by-domain", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/login.js", + "line_number": 11, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: POST /createRecipe", + "severity": "MEDIUM", + "file_path": "routes/recipe.js", + "line_number": 8, + "description": "API endpoint POST /createRecipe lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST /createRecipe endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/createRecipe', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/createRecipe', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/createRecipe", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/recipe.js", + "line_number": 10, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: DELETE /", + "severity": "MEDIUM", + "file_path": "routes/recipe.js", + "line_number": 11, + "description": "API endpoint DELETE / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the DELETE / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.delete('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.delete('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "DELETE" + }, + { + "title": "Missing JWT Protection: GET /", + "severity": "MEDIUM", + "file_path": "routes/recipeNutritionlog.js", + "line_number": 27, + "description": "API endpoint GET / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/userfeedback.js", + "line_number": 8, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: GET /", + "severity": "MEDIUM", + "file_path": "routes/healthNews.js", + "line_number": 44, + "description": "API endpoint GET / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/healthNews.js", + "line_number": 156, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: PUT /", + "severity": "MEDIUM", + "file_path": "routes/healthNews.js", + "line_number": 214, + "description": "API endpoint PUT / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the PUT / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.put('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.put('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "PUT" + }, + { + "title": "Missing JWT Protection: DELETE /", + "severity": "MEDIUM", + "file_path": "routes/healthNews.js", + "line_number": 238, + "description": "API endpoint DELETE / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the DELETE / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.delete('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.delete('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "DELETE" + }, + { + "title": "Missing JWT Protection: POST /generate-baseline", + "severity": "MEDIUM", + "file_path": "routes/systemRoutes.js", + "line_number": 50, + "description": "API endpoint POST /generate-baseline lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST /generate-baseline endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/generate-baseline', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/generate-baseline', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/generate-baseline", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: GET /common", + "severity": "MEDIUM", + "file_path": "routes/allergyRoutes.js", + "line_number": 99, + "description": "API endpoint GET /common lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET /common endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/common', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/common', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/common", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: POST /check", + "severity": "MEDIUM", + "file_path": "routes/allergyRoutes.js", + "line_number": 127, + "description": "API endpoint POST /check lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST /check endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/check', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/check', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/check", + "http_method": "POST" + }, + { + "title": "Missing JWT Protection: GET /", + "severity": "MEDIUM", + "file_path": "routes/filter.js", + "line_number": 7, + "description": "API endpoint GET / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: GET /", + "severity": "MEDIUM", + "file_path": "routes/articles.js", + "line_number": 5, + "description": "API endpoint GET / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the GET / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.get('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.get('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "GET" + }, + { + "title": "Missing JWT Protection: POST /", + "severity": "MEDIUM", + "file_path": "routes/contactus.js", + "line_number": 14, + "description": "API endpoint POST / lacks JWT authentication middleware", + "plugin_name": "JWTMissingProtectionPlugin", + "recommendation": { + "summary": "Protect the POST / endpoint with authentication middleware.", + "steps": [ + "Import authentication middleware: const authenticateToken = require('../middleware/authenticateToken');", + "Add middleware to route: router.post('/', authenticateToken, (req, res) => { /* handler */ });", + "Ensure JWT configuration is secure: use strong secrets, set appropriate expiration, and handle errors properly." + ], + "code": "router.post('/', authenticateToken, (req, res) => {\n // Your route handler\n});" + }, + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": "/", + "http_method": "POST" + }, + { + "title": "Low Entropy JWT Secret", + "severity": "MEDIUM", + "file_path": ".env", + "line_number": 8, + "description": "JWT secret appears to have low entropy (predictable patterns).", + "plugin_name": "JWTConfigurationPlugin", + "recommendation": "Improve JWT secret security:\n1. Generate a strong secret using crypto:\n const crypto = require('crypto');\n const secret = crypto.randomBytes(64).toString('hex');\n\n2. Use environment-specific secrets\n3. Implement secret rotation\n4. Consider using asymmetric keys for larger systems", + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": null, + "http_method": null + }, + { + "title": "Low Entropy JWT Secret", + "severity": "MEDIUM", + "file_path": ".env", + "line_number": 10, + "description": "JWT secret appears to have low entropy (predictable patterns).", + "plugin_name": "JWTConfigurationPlugin", + "recommendation": "Improve JWT secret security:\n1. Generate a strong secret using crypto:\n const crypto = require('crypto');\n const secret = crypto.randomBytes(64).toString('hex');\n\n2. Use environment-specific secrets\n3. Implement secret rotation\n4. Consider using asymmetric keys for larger systems", + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": null, + "http_method": null + }, + { + "title": "Direct JWT Usage Instead of AuthService", + "severity": "MEDIUM", + "file_path": "middleware.js", + "line_number": null, + "description": "Direct jwt.verify() usage detected instead of centralized authService.", + "plugin_name": "JWTConfigurationPlugin", + "recommendation": "Centralize JWT verification:\n1. Create AuthService class\n2. Move all JWT operations to AuthService\n3. Use AuthService.verifyToken() in middleware\n4. Add comprehensive error handling", + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": null, + "http_method": null + }, + { + "title": "Incomplete JWT Error Handling", + "severity": "LOW", + "file_path": "middleware.js", + "line_number": null, + "description": "JWT verification lacks comprehensive error handling.", + "plugin_name": "JWTConfigurationPlugin", + "recommendation": "Implement proper JWT error handling:\n1. Handle TokenExpiredError\n2. Handle JsonWebTokenError\n3. Handle NotBeforeError\n4. Add logging for security events\n5. Return appropriate status codes", + "rule_id": null, + "rule_name": null, + "rule_mode": null, + "confidence": null, + "route": null, + "http_method": null + } + ], + "summary": { + "total": 31, + "files_scanned": 147, + "by_severity": { + "MEDIUM": 30, + "LOW": 1 + }, + "by_plugin": { + "JWTMissingProtectionPlugin": 27, + "JWTConfigurationPlugin": 4 + } + }, + "scan_info": { + "target_path": ".", + "timestamp": "2025-09-19T05:39:04.909528", + "scanner_version": "2.0.0", + "stats": { + "files_scanned": 147, + "plugins_loaded": 3, + "total_findings": 31 + } + } +} \ No newline at end of file