diff --git a/.github/workflows/bundle-analysis.yml b/.github/workflows/bundle-analysis.yml new file mode 100644 index 00000000..a605eb1a --- /dev/null +++ b/.github/workflows/bundle-analysis.yml @@ -0,0 +1,73 @@ +name: Bundle Analysis + +on: + pull_request: + paths: + - 'frontend/**' + branches: [ main, master ] + push: + paths: + - 'frontend/**' + branches: [ main, master ] + +jobs: + bundle-analysis: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'npm' + cache-dependency-path: frontend/package-lock.json + + - name: Install Dependencies + run: | + cd frontend + npm ci + + - name: Build for Bundle Analysis + run: | + cd frontend + # Create a production build for analysis + if npm run build --dry-run 2>/dev/null; then + npm run build + else + # Use Expo's build process + npx expo export:web + fi + + - name: Analyze Bundle Size + run: | + cd frontend + # Install bundle analyzer + npm install --no-save webpack-bundle-analyzer + + # Generate bundle stats (adjust path based on your build output) + if [ -d "web-build" ]; then + # Expo web build + npx webpack-bundle-analyzer web-build/static/js/*.js --report --mode static --report-filename bundle-report.html + elif [ -d "dist" ]; then + # Standard React build + npx webpack-bundle-analyzer dist/static/js/*.js --report --mode static --report-filename bundle-report.html + else + echo "No build output found for bundle analysis" + fi + + - name: Upload Bundle Analysis to Codecov + uses: codecov/codecov-action@v4 + if: github.actor != 'dependabot[bot]' + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: bundle,frontend,javascript + name: "Bundle Analysis" + fail_ci_if_error: false + + - name: Bundle Analysis Skipped + if: github.actor == 'dependabot[bot]' + run: echo "📦 Bundle analysis skipped for Dependabot PR" diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 03396aaa..50ed93fa 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -1,4 +1,4 @@ -name: Run Tests +name: Run Tests & Analytics on: pull_request: @@ -12,6 +12,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Full history for better diff analysis - name: Set up Python uses: actions/setup-python@v5 @@ -24,21 +26,87 @@ jobs: cd backend pip install -r requirements.txt - - name: Run tests + - name: Run Backend Tests with Coverage run: | cd $GITHUB_WORKSPACE export PYTHONPATH=$GITHUB_WORKSPACE:$GITHUB_WORKSPACE/backend - pytest --cov=./backend --cov-report=xml:coverage.xml backend/tests/ - - - name: Upload coverage to Codecov + # Generate coverage with detailed flags + pytest \ + --cov=./backend \ + --cov-report=xml:coverage.xml \ + --cov-report=json:coverage.json \ + --cov-report=lcov:coverage.lcov \ + --junit-xml=test-results.xml \ + --tb=short \ + -v \ + backend/tests/ + + - name: Run Test Analytics Upload + uses: codecov/test-results-action@v1 + if: github.actor != 'dependabot[bot]' + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: test-results.xml + flags: backend,test-analytics + name: "Backend Test Results" + + - name: Upload Coverage to Codecov with Flags uses: codecov/codecov-action@v4 + if: github.actor != 'dependabot[bot]' with: token: ${{ secrets.CODECOV_TOKEN }} - files: ./coverage.xml + files: ./coverage.xml,./coverage.json,./coverage.lcov + flags: backend,python,api + name: "Backend Coverage" fail_ci_if_error: false - # Skip codecov upload for Dependabot PRs since they don't have access to secrets - if: github.actor != 'dependabot[bot]' - + verbose: true + - name: Codecov upload skipped for Dependabot if: github.actor == 'dependabot[bot]' run: echo "📊 Codecov upload skipped for Dependabot PR - tests still run and pass!" + + test-frontend: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'npm' + cache-dependency-path: frontend/package-lock.json + + - name: Install Frontend Dependencies + run: | + cd frontend + npm ci + + - name: Run Frontend Tests (if available) + run: | + cd frontend + # Check if test script exists + if npm run test --dry-run 2>/dev/null; then + npm run test -- --coverage --watchAll=false --testResultsProcessor=jest-junit + else + echo "No frontend tests configured yet" + # Create a placeholder test result for analytics + mkdir -p test-results + echo '' > test-results/frontend-results.xml + fi + + - name: Upload Frontend Test Analytics + uses: codecov/test-results-action@v1 + if: github.actor != 'dependabot[bot]' + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: frontend/test-results/frontend-results.xml + flags: frontend,javascript,react-native + name: "Frontend Test Results" + + - name: Frontend Analytics Upload Skipped + if: github.actor == 'dependabot[bot]' + run: echo "📊 Frontend test analytics skipped for Dependabot PR" diff --git a/backend/pytest.ini b/backend/pytest.ini index 606976f3..2e7848c9 100644 --- a/backend/pytest.ini +++ b/backend/pytest.ini @@ -9,5 +9,23 @@ python_files = test_*.py tests_*.py *_test.py *_tests.py python_classes = Test* Tests* python_functions = test_* -# Optional: Add default command line options if desired -# addopts = --verbose +# Enhanced options for better test analytics and coverage +addopts = + --verbose + --tb=short + --strict-markers + --disable-warnings + --durations=10 + --cov-branch + --cov-fail-under=0 + +# Test markers for categorization +markers = + unit: Unit tests + integration: Integration tests + auth: Authentication related tests + expenses: Expense management tests + groups: Group management tests + user: User management tests + slow: Slow running tests + api: API endpoint tests diff --git a/codecov.yml b/codecov.yml index d57ccda4..c94a00c3 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,5 +1,6 @@ # Codecov configuration for SplitWiser # Documentation: https://docs.codecov.com/docs/codecov-yaml +# Features: Test Analytics, Flags, Components, Bundle Analysis coverage: status: @@ -7,15 +8,94 @@ coverage: default: target: auto threshold: 1% - # Allow slight coverage drops for dependency updates if_ci_failed: error + backend: + target: auto + threshold: 1% + flags: + - backend + paths: + - backend/ + frontend: + target: auto + threshold: 1% + flags: + - frontend + paths: + - frontend/ patch: default: target: auto threshold: 1% - # More lenient for dependency-only changes if_ci_failed: error +# Flags for different parts of the codebase +flag_management: + default_rules: + carryforward: true + statuses: + - type: project + target: auto + threshold: 1% + - type: patch + target: auto + threshold: 1% + +# Components for modular coverage tracking +component_management: + default_rules: + statuses: + - type: project + target: auto + - type: patch + target: auto + individual_components: + - component_id: backend-auth + name: "Authentication System" + paths: + - backend/app/auth/ + - backend/tests/auth/ + flag_regexes: + - backend + - component_id: backend-expenses + name: "Expense Management" + paths: + - backend/app/expenses/ + - backend/tests/expenses/ + flag_regexes: + - backend + - component_id: backend-groups + name: "Group Management" + paths: + - backend/app/groups/ + - backend/tests/groups/ + flag_regexes: + - backend + - component_id: backend-user + name: "User Management" + paths: + - backend/app/user/ + - backend/tests/user/ + flag_regexes: + - backend + - component_id: frontend-core + name: "Frontend Core" + paths: + - frontend/ + flag_regexes: + - frontend + +# Test Analytics configuration +test_analytics: + # Track test performance and flakiness + notify: + after_n_builds: 5 + slack: + url: "https://hooks.slack.com/services/your/slack/webhook" + threshold: 5% + message: "Test suite analytics show concerning trends" + only_pulls: false + # Ignore files that don't need coverage ignore: - "**/__pycache__/**" diff --git a/docs/codecov-analytics.md b/docs/codecov-analytics.md new file mode 100644 index 00000000..717eeceb --- /dev/null +++ b/docs/codecov-analytics.md @@ -0,0 +1,122 @@ +# Codecov Analytics Configuration + +This document describes the comprehensive Codecov analytics setup for SplitWiser. + +## 📊 **Features Implemented** + +### 1. **Test Analytics** +- **Real-time test performance tracking** +- **Flaky test detection** +- **Test execution time monitoring** +- **Test failure pattern analysis** + +### 2. **Coverage Flags** +- `backend` - Backend Python code coverage +- `python` - Python-specific coverage +- `api` - API endpoint coverage +- `frontend` - Frontend JavaScript coverage +- `javascript` - JavaScript-specific coverage +- `react-native` - React Native component coverage +- `bundle` - Bundle size analysis +- `test-analytics` - Test performance metrics + +### 3. **Components** +- **backend-auth** - Authentication System +- **backend-expenses** - Expense Management +- **backend-groups** - Group Management +- **backend-user** - User Management +- **frontend-core** - Frontend Core + +### 4. **Bundle Analysis** +- **Frontend bundle size tracking** +- **Dependency impact analysis** +- **Performance regression detection** + +## 🚀 **Usage** + +### **Viewing Analytics** +1. **Test Analytics**: https://app.codecov.io/gh/Devasy23/splitwiser/tests +2. **Bundle Analysis**: https://app.codecov.io/gh/Devasy23/splitwiser/bundles +3. **Components**: Available in Codecov dashboard under Components tab + +### **Flags in Action** +- Each PR shows coverage for different flags +- Component-specific coverage tracking +- Historical trend analysis + +### **Test Categories** +Use pytest markers for better organization: +```python +@pytest.mark.unit +@pytest.mark.auth +def test_login(): + pass + +@pytest.mark.integration +@pytest.mark.api +def test_api_endpoint(): + pass +``` + +## 📈 **Analytics Benefits** + +### **Test Analytics** +- ✅ **Identify slow tests** - Optimize test suite performance +- ✅ **Detect flaky tests** - Improve test reliability +- ✅ **Track test trends** - Monitor test suite health +- ✅ **Performance insights** - Data-driven optimization + +### **Coverage Flags** +- ✅ **Modular tracking** - Separate backend/frontend coverage +- ✅ **Feature-specific** - Component-level coverage +- ✅ **Historical data** - Trend analysis over time +- ✅ **PR insights** - Impact of changes on coverage + +### **Bundle Analysis** +- ✅ **Size tracking** - Monitor bundle growth +- ✅ **Dependency impact** - See how changes affect bundle size +- ✅ **Performance metrics** - Load time implications +- ✅ **Regression detection** - Catch size increases early + +## 🔧 **Configuration Files** + +### **codecov.yml** +- Flag management rules +- Component definitions +- Test analytics settings +- Coverage thresholds + +### **GitHub Workflows** +- **run-tests.yml** - Main test execution with analytics +- **bundle-analysis.yml** - Frontend bundle analysis + +### **pytest.ini** +- Enhanced test markers +- Coverage configuration +- Performance tracking options + +## 🎯 **Best Practices** + +### **Writing Tests** +1. **Use descriptive markers** for better categorization +2. **Keep tests fast** - Use `@pytest.mark.slow` for slow tests +3. **Modular testing** - Group related tests together +4. **Clear naming** - Descriptive test function names + +### **Monitoring** +1. **Regular review** of test analytics dashboard +2. **Address flaky tests** promptly +3. **Monitor bundle size** trends +4. **Review component coverage** for each feature + +### **PR Reviews** +1. **Check coverage impact** using flags +2. **Review bundle size changes** for frontend PRs +3. **Monitor test performance** changes +4. **Ensure component coverage** meets standards + +## 🔗 **Links** +- [Codecov Dashboard](https://app.codecov.io/gh/Devasy23/splitwiser) +- [Test Analytics](https://app.codecov.io/gh/Devasy23/splitwiser/tests) +- [Bundle Analysis](https://app.codecov.io/gh/Devasy23/splitwiser/bundles) +- [Codecov Documentation](https://docs.codecov.com/)