From bee43667784090b2ae2199e46e32d064a56d0886 Mon Sep 17 00:00:00 2001 From: Gauri Date: Fri, 30 Jan 2026 20:40:21 +0530 Subject: [PATCH 1/4] Add Manage Summary Report feature with backend and frontend changes --- .../cb863878e0d1_add_contest_report_table.py | 72 ++ ...f418_merge_report_feature_and_outreach_.py | 25 + backend/app/__init__.py | 3 + backend/app/models/contest_report.py | 135 +++ backend/app/routes/report_routes.py | 422 +++++++ backend/app/utils/report_builder.py | 1035 +++++++++++++++++ backend/app/utils/report_queries.py | 169 +++ frontend/src/views/ContestView.vue | 317 ++++- 8 files changed, 2168 insertions(+), 10 deletions(-) create mode 100644 backend/alembic/versions/cb863878e0d1_add_contest_report_table.py create mode 100644 backend/alembic/versions/e4e56960f418_merge_report_feature_and_outreach_.py create mode 100644 backend/app/models/contest_report.py create mode 100644 backend/app/routes/report_routes.py create mode 100644 backend/app/utils/report_builder.py create mode 100644 backend/app/utils/report_queries.py diff --git a/backend/alembic/versions/cb863878e0d1_add_contest_report_table.py b/backend/alembic/versions/cb863878e0d1_add_contest_report_table.py new file mode 100644 index 0000000..2a6a7f9 --- /dev/null +++ b/backend/alembic/versions/cb863878e0d1_add_contest_report_table.py @@ -0,0 +1,72 @@ +"""add_contest_report_table + +Revision ID: xxxxxxxxxxxx # Alembic automatically generates this +Revises: yyyyyyyyyyyy # Previous migration ID +Create Date: 2026-01-21 10:30:00.000000 + +""" +from alembic import op +import sqlalchemy as sa + + +revision = 'cb863878e0d1' +down_revision = 'de4074ff4ff8' +branch_labels = None +depends_on = None + + +def upgrade(): + """Create contest_reports table""" + + # Create contest_reports table + op.create_table( + 'contest_reports', + + # Primary key + sa.Column('id', sa.Integer(), nullable=False, autoincrement=True), + + # Foreign keys + sa.Column('contest_id', sa.Integer(), nullable=False), + sa.Column('generated_by', sa.Integer(), nullable=False), + + # Report configuration + sa.Column('report_type', sa.String(length=20), nullable=False), + sa.Column('status', sa.String(length=20), nullable=False, server_default='pending'), + + # File storage + sa.Column('file_path', sa.String(length=500), nullable=True), + + # Error handling + sa.Column('error_message', sa.Text(), nullable=True), + + # Report parameters (JSON) + sa.Column('report_metadata', sa.Text(), nullable=True), + + # Timestamps + sa.Column('created_at', sa.DateTime(), nullable=False, server_default=sa.func.now()), + sa.Column('updated_at', sa.DateTime(), nullable=False, server_default=sa.func.now(), onupdate=sa.func.now()), + + # Primary key constraint + sa.PrimaryKeyConstraint('id'), + + # Foreign key constraints + sa.ForeignKeyConstraint(['contest_id'], ['contests.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['generated_by'], ['users.id'], ondelete='CASCADE'), + ) + + # Create indexes for better query performance + op.create_index('idx_contest_reports_contest_id', 'contest_reports', ['contest_id']) + op.create_index('idx_contest_reports_generated_by', 'contest_reports', ['generated_by']) + op.create_index('idx_contest_reports_status', 'contest_reports', ['status']) + + +def downgrade(): + """Drop contest_reports table""" + + # Drop indexes first + op.drop_index('idx_contest_reports_status', table_name='contest_reports') + op.drop_index('idx_contest_reports_generated_by', table_name='contest_reports') + op.drop_index('idx_contest_reports_contest_id', table_name='contest_reports') + + # Drop table + op.drop_table('contest_reports') \ No newline at end of file diff --git a/backend/alembic/versions/e4e56960f418_merge_report_feature_and_outreach_.py b/backend/alembic/versions/e4e56960f418_merge_report_feature_and_outreach_.py new file mode 100644 index 0000000..6be5506 --- /dev/null +++ b/backend/alembic/versions/e4e56960f418_merge_report_feature_and_outreach_.py @@ -0,0 +1,25 @@ +"""merge report feature and outreach dashboard migrations + +Revision ID: e4e56960f418 +Revises: cb863878e0d1, d55c876a1323 +Create Date: 2026-01-30 20:30:29.797250 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'e4e56960f418' +down_revision = ('cb863878e0d1', 'd55c876a1323') +branch_labels = None +depends_on = None + + +def upgrade() -> None: + pass + + +def downgrade() -> None: + pass + diff --git a/backend/app/__init__.py b/backend/app/__init__.py index e8f5d2d..35a6ea0 100644 --- a/backend/app/__init__.py +++ b/backend/app/__init__.py @@ -40,9 +40,11 @@ from app.models.user import User # pylint: disable=unused-import from app.models.contest import Contest # pylint: disable=unused-import from app.models.submission import Submission # pylint: disable=unused-import +from app.models.contest_report import ContestReport # pylint: disable=unused-import from app.routes.user_routes import user_bp from app.routes.contest_routes import contest_bp from app.routes.submission_routes import submission_bp +from app.routes.report_routes import report_bp from app.utils import ( extract_page_title_from_url, build_mediawiki_revisions_api_params, @@ -201,6 +203,7 @@ def create_app(): app.register_blueprint(user_bp, url_prefix='/api/user') # User management endpoints app.register_blueprint(contest_bp, url_prefix='/api/contest') # Contest endpoints app.register_blueprint(submission_bp, url_prefix='/api/submission') # Submission endpoints +app.register_blueprint(report_bp, url_prefix='/api/report') # --------------------------------------------------------------------------- diff --git a/backend/app/models/contest_report.py b/backend/app/models/contest_report.py new file mode 100644 index 0000000..9612d92 --- /dev/null +++ b/backend/app/models/contest_report.py @@ -0,0 +1,135 @@ +""" +Contest Report Model for WikiContest Application +Stores generated contest reports and their metadata +FIXED: Proper inheritance from BaseModel for created_at/updated_at +""" + +from datetime import datetime +from app.database import db +from app.models.base_model import BaseModel + + +class ContestReport(BaseModel): + """ + Contest Report model representing generated reports for contests + + Attributes: + id: Primary key, auto-incrementing integer + contest_id: Foreign key to contests table + report_type: Type of report ('csv' or 'pdf') + status: Generation status ('pending', 'processing', 'completed', 'failed') + file_path: Path to generated report file + generated_by: Foreign key to users table (who requested the report) + error_message: Error message if generation failed + report_metadata: JSON containing report parameters (top_n, filters, etc.) + created_at: Timestamp when report generation was requested (from BaseModel) + updated_at: Timestamp when report status was last updated (from BaseModel) + """ + + __tablename__ = "contest_reports" + + # Primary key + id = db.Column(db.Integer, primary_key=True, autoincrement=True) + + # Foreign keys + contest_id = db.Column(db.Integer, db.ForeignKey("contests.id"), nullable=False) + generated_by = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False) + + # Report configuration + report_type = db.Column(db.String(20), nullable=False) # 'csv' or 'pdf' + status = db.Column(db.String(20), default='pending', nullable=False) + + # File storage + file_path = db.Column(db.String(500), nullable=True) + + # Error handling + error_message = db.Column(db.Text, nullable=True) + + # Report parameters (stored as JSON string) + report_metadata = db.Column(db.Text, nullable=True) + + # Relationships + contest = db.relationship("Contest", backref="reports") + generator = db.relationship("User", backref="generated_reports") + + def __init__(self, contest_id, report_type, generated_by, report_metadata=None): + """ + Initialize a new ContestReport instance + + IMPORTANT: Don't manually set created_at/updated_at + They are automatically handled by BaseModel + + Args: + contest_id: ID of the contest + report_type: Type of report ('csv' or 'pdf') + generated_by: ID of user requesting the report + report_metadata: Optional dict of report parameters + """ + # Call parent __init__ to set created_at/updated_at + super().__init__() + + self.contest_id = contest_id + self.report_type = report_type + self.generated_by = generated_by + self.status = 'pending' + + # Store report_metadata as JSON string + if report_metadata: + import json + self.report_metadata = json.dumps(report_metadata) + else: + self.report_metadata = None + + def get_metadata(self): + """ + Get report metadata as dictionary + + Returns: + dict or None: Report parameters + """ + if not self.report_metadata: + return None + try: + import json + return json.loads(self.report_metadata) + except Exception: + return None + + def is_completed(self): + """Check if report generation is completed""" + return self.status == 'completed' + + def is_failed(self): + """Check if report generation failed""" + return self.status == 'failed' + + def is_processing(self): + """Check if report is currently being generated""" + return self.status == 'processing' + + def to_dict(self): + """ + Convert report instance to dictionary for JSON serialization + + NOTE: created_at and updated_at come from BaseModel + + Returns: + dict: Report data + """ + return { + 'id': self.id, + 'contest_id': self.contest_id, + 'report_type': self.report_type, + 'status': self.status, + 'file_path': self.file_path if self.is_completed() else None, + 'error_message': self.error_message if self.is_failed() else None, + 'report_metadata': self.get_metadata(), + 'generated_by': self.generated_by, + # ✅ These come from BaseModel - should work now + 'created_at': self.created_at.isoformat() if hasattr(self, 'created_at') and self.created_at else None, + 'updated_at': self.updated_at.isoformat() if hasattr(self, 'updated_at') and self.updated_at else None, + } + + def __repr__(self): + """String representation of ContestReport instance""" + return f"" \ No newline at end of file diff --git a/backend/app/routes/report_routes.py b/backend/app/routes/report_routes.py new file mode 100644 index 0000000..0a7f2be --- /dev/null +++ b/backend/app/routes/report_routes.py @@ -0,0 +1,422 @@ +""" +Report Routes for WikiContest Application +ENHANCED: Better error handling and detailed logging +""" + +from flask import Blueprint, request, send_file, jsonify +from app.middleware.auth import require_auth, handle_errors +from app.models.contest import Contest +from app.models.contest_report import ContestReport +from app.utils.report_builder import CSVReportBuilder, PDFReportBuilder +from app.utils.report_queries import ( + get_submission_statistics, + get_top_contributors +) +from app.database import db +import os +import traceback + +report_bp = Blueprint('report', __name__) + + +@report_bp.route('/contest//generate', methods=['POST']) +@require_auth +@handle_errors +def generate_report(contest_id): + """Generate a contest report (CSV or PDF) with comprehensive error handling""" + + report = None + + try: + # Step 1: Get current user + current_user = request.current_user + + # Step 2: Fetch contest + contest = Contest.query.get(contest_id) + if not contest: + return jsonify({'error': 'Contest not found'}), 404 + + # Step 3: Check permissions + is_admin = current_user.is_admin() + is_creator = current_user.is_contest_creator(contest) + is_organizer = current_user.is_contest_organizer(contest) + + if not (is_admin or is_creator or is_organizer): + return jsonify({'error': 'Insufficient permissions'}), 403 + + # Step 4: Parse request + data = request.get_json() or {} + report_type = data.get('report_type', 'csv') + top_n = data.get('top_n', 100) + + if report_type not in ['csv', 'pdf']: + return jsonify({ + 'error': 'Invalid report type. Must be "csv" or "pdf"' + }), 400 + + # Step 5: Validate contest has submissions + from app.models.submission import Submission + submission_count = Submission.query.filter_by(contest_id=contest_id).count() + + if submission_count == 0: + print(f"⚠️ Warning: No submissions to report on") + # Continue anyway - empty report is valid + + # Step 6: Create report record + metadata = {'top_n': top_n} + + try: + report = ContestReport( + contest_id=contest_id, + report_type=report_type, + generated_by=current_user.id, + report_metadata=metadata + ) + db.session.add(report) + db.session.commit() + print(f" Report ID: {report.id}") + except Exception as db_error: + print(f"Database error creating report: {db_error}") + traceback.print_exc() + return jsonify({ + 'error': 'Failed to create report record', + 'details': str(db_error) + }), 500 + + # Step 7: Generate report + report.status = 'processing' + db.session.commit() + + report_metadata = report.get_metadata() or {} + file_path = None + + try: + if report_type == 'csv': + print(f" Creating CSV builder...") + builder = CSVReportBuilder(contest, report_metadata) + print(f" Generating CSV...") + file_path = builder.generate() + else: # pdf + print(f" Creating PDF builder...") + builder = PDFReportBuilder(contest, report_metadata) + print(f" Generating PDF...") + file_path = builder.generate() + + # Verify file exists + if not os.path.exists(file_path): + raise Exception(f"Generated file not found at {file_path}") + + file_size = os.path.getsize(file_path) + if file_size == 0: + raise Exception("Generated file is empty") + + except Exception as gen_error: + traceback.print_exc() + + report.status = 'failed' + report.error_message = str(gen_error) + db.session.commit() + + return jsonify({ + 'error': 'Report generation failed', + 'details': str(gen_error), + 'type': type(gen_error).__name__ + }), 500 + + # Update status + report.status = 'completed' + report.file_path = file_path + db.session.commit() + + return jsonify({ + 'success': True, + 'message': 'Report generated successfully', + 'report': report.to_dict() + }), 200 + + except Exception as e: + traceback.print_exc() + + # Update report status if it was created + if report: + try: + report.status = 'failed' + report.error_message = str(e) + db.session.commit() + except Exception as db_error: + print(f"⚠️ Failed to update report status: {db_error}") + db.session.rollback() + + return jsonify({ + 'error': 'Report generation failed', + 'details': str(e), + 'type': type(e).__name__ + }), 500 + + +@report_bp.route('/contest//reports', methods=['GET']) +@require_auth +@handle_errors +def list_reports(contest_id): + """List all reports for a contest""" + try: + + current_user = request.current_user + contest = Contest.query.get_or_404(contest_id) + + # Check permissions + if not (current_user.is_admin() or + current_user.is_contest_creator(contest) or + current_user.is_contest_organizer(contest)): + return jsonify({'error': 'Insufficient permissions'}), 403 + + # Fetch reports + reports = ContestReport.query.filter_by( + contest_id=contest_id + ).order_by( + ContestReport.id.desc() + ).all() + + return jsonify({ + 'success': True, + 'reports': [r.to_dict() for r in reports] + }), 200 + + except Exception as e: + traceback.print_exc() + return jsonify({'error': str(e)}), 500 + + +@report_bp.route('/report//download', methods=['GET']) +@require_auth +@handle_errors +def download_report(report_id): + """Download a generated report""" + try: + + current_user = request.current_user + report = ContestReport.query.get_or_404(report_id) + contest = report.contest + + # Check permissions + if not (current_user.is_admin() or + current_user.is_contest_creator(contest) or + current_user.is_contest_organizer(contest)): + return jsonify({'error': 'Insufficient permissions'}), 403 + + # Check if report is ready + if report.status != 'completed': + print(f"⚠️ Report not ready: {report.status}") + return jsonify({ + 'error': 'Report not ready', + 'status': report.status, + 'error_message': report.error_message + }), 400 + + # Check if file exists + if not report.file_path: + print(f"❌ No file path in database") + return jsonify({'error': 'Report file path missing'}), 404 + + if not os.path.exists(report.file_path): + print(f"❌ File not found: {report.file_path}") + return jsonify({'error': 'Report file not found on disk'}), 404 + + # Send file + filename = f"contest_{report.contest_id}_report.{report.report_type}" + print(f"✅ Sending file: {filename} ({os.path.getsize(report.file_path)} bytes)") + + return send_file( + report.file_path, + as_attachment=True, + download_name=filename, + mimetype='text/csv' if report.report_type == 'csv' else 'application/pdf' + ) + + except Exception as e: + print(f"❌ Download error: {e}") + traceback.print_exc() + return jsonify({'error': f'Failed to download report: {str(e)}'}), 500 + + +@report_bp.route('/report//status', methods=['GET']) +@require_auth +@handle_errors +def report_status(report_id): + """Check report generation status""" + try: + current_user = request.current_user + report = ContestReport.query.get_or_404(report_id) + contest = report.contest + + # Check permissions + if not (current_user.is_admin() or + current_user.is_contest_creator(contest) or + current_user.is_contest_organizer(contest)): + return jsonify({'error': 'Insufficient permissions'}), 403 + + return jsonify({ + 'success': True, + 'report': report.to_dict() + }), 200 + + except Exception as e: + print(f"❌ Status check error: {e}") + return jsonify({'error': str(e)}), 500 + + +@report_bp.route('/contest//preview', methods=['GET']) +@require_auth +@handle_errors +def preview_report(contest_id): + """Preview report data without generating full file""" + try: + print(f"\n👁️ Preview request for contest {contest_id}") + + current_user = request.current_user + contest = Contest.query.get_or_404(contest_id) + + # Check permissions + if not (current_user.is_admin() or + current_user.is_contest_creator(contest) or + current_user.is_contest_organizer(contest)): + return jsonify({'error': 'Insufficient permissions'}), 403 + + # Get preview parameters + top_n = request.args.get('top_n', 10, type=int) + + # Fetch preview data with error handling + try: + stats = get_submission_statistics(contest_id) + print(f" ✅ Statistics loaded") + except Exception as e: + print(f" ❌ Statistics failed: {e}") + traceback.print_exc() + return jsonify({ + 'error': 'Failed to fetch statistics', + 'details': str(e) + }), 500 + + try: + top_contributors = get_top_contributors(contest_id, limit=top_n) + print(f" ✅ Top contributors loaded: {len(top_contributors)}") + except Exception as e: + print(f" ❌ Contributors failed: {e}") + traceback.print_exc() + top_contributors = [] + + return jsonify({ + 'success': True, + 'preview': { + 'summary': stats, + 'top_contributors': top_contributors, + 'contest': { + 'id': contest.id, + 'name': contest.name, + 'start_date': contest.start_date.isoformat() if contest.start_date else None, + 'end_date': contest.end_date.isoformat() if contest.end_date else None, + } + } + }), 200 + + except Exception as e: + print(f"❌ Preview error: {e}") + traceback.print_exc() + return jsonify({'error': str(e)}), 500 + + +@report_bp.route('/report/', methods=['DELETE']) +@require_auth +@handle_errors +def delete_report(report_id): + """Delete a generated report""" + try: + print(f"\n🗑️ Delete request for report {report_id}") + + current_user = request.current_user + report = ContestReport.query.get_or_404(report_id) + contest = report.contest + + # Check permissions (only admin or contest creator) + if not (current_user.is_admin() or current_user.is_contest_creator(contest)): + return jsonify({'error': 'Insufficient permissions'}), 403 + + # Delete file if it exists + if report.file_path and os.path.exists(report.file_path): + try: + os.remove(report.file_path) + print(f"✅ Deleted file: {report.file_path}") + except Exception as e: + print(f"⚠️ File delete error: {e}") + + # Delete database record + db.session.delete(report) + db.session.commit() + + print(f"✅ Report {report_id} deleted successfully") + + return jsonify({ + 'success': True, + 'message': 'Report deleted successfully' + }), 200 + + except Exception as e: + print(f"❌ Delete error: {e}") + traceback.print_exc() + return jsonify({'error': str(e)}), 500 + + +@report_bp.route('/health', methods=['GET']) +def report_health(): + """Health check endpoint for report system""" + dependencies = { + 'database': False, + 'reportlab': False, + 'matplotlib': False, + 'reports_directory': False + } + + # Check database + try: + db.session.execute('SELECT 1') + dependencies['database'] = True + except Exception as e: + print(f"❌ Database check failed: {e}") + + # Check reportlab + try: + import reportlab + dependencies['reportlab'] = True + except ImportError: + print("❌ reportlab not installed") + + # Check matplotlib + try: + import matplotlib + dependencies['matplotlib'] = True + except ImportError: + print("❌ matplotlib not installed") + + # Check reports directory + try: + if os.path.exists('/data/project'): + reports_dir = '/data/project/wikicontest/reports' + else: + reports_dir = os.path.join(os.path.dirname(__file__), '../../reports') + + os.makedirs(reports_dir, exist_ok=True) + test_file = os.path.join(reports_dir, '.health_check') + with open(test_file, 'w') as f: + f.write('test') + os.remove(test_file) + dependencies['reports_directory'] = True + except Exception as e: + print(f"❌ Reports directory check failed: {e}") + + all_ok = all(dependencies.values()) + + return jsonify({ + 'status': 'healthy' if all_ok else 'degraded', + 'dependencies': dependencies, + 'message': 'All dependencies available' if all_ok else 'Some dependencies missing' + }), 200 if all_ok else 503 \ No newline at end of file diff --git a/backend/app/utils/report_builder.py b/backend/app/utils/report_builder.py new file mode 100644 index 0000000..39447be --- /dev/null +++ b/backend/app/utils/report_builder.py @@ -0,0 +1,1035 @@ +""" +Report Builder for WikiContest Application +COMPLETE VERSION: Comprehensive CSV and PDF reports +FIXED: Handles missing Contest attributes gracefully +""" + +import os +import io +from datetime import datetime +from collections import defaultdict +from app.utils.report_queries import ( + get_submission_statistics, + get_top_contributors, + get_submission_timeline, + get_judge_statistics, + get_all_submissions +) + + +class CSVReportBuilder: + """Build ultra-comprehensive CSV reports with ALL contest and submission details""" + + def __init__(self, contest, metadata=None): + self.contest = contest + self.metadata = metadata or {} + + def _safe_get(self, attr, default='N/A'): + """Safely get contest attribute with fallback""" + return getattr(self.contest, attr, default) + + def generate(self): + """Generate comprehensive CSV report with all insights""" + + # Fetch all data + stats = get_submission_statistics(self.contest.id) + top_n = self.metadata.get('top_n', 100) + top_contributors = get_top_contributors(self.contest.id, limit=top_n) + timeline = get_submission_timeline(self.contest.id) + judges = get_judge_statistics(self.contest.id) + + output = io.StringIO() + + # ===================================================================== + # SECTION 1: REPORT HEADER & METADATA + # ===================================================================== + output.write("=" * 150 + "\n") + output.write(f"ULTRA-COMPREHENSIVE CONTEST REPORT - {self.contest.name.upper()}\n") + output.write("=" * 150 + "\n") + output.write(f"Generated At:,{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')}\n") + output.write(f"Report Type:,Complete Contest Analysis with All Insights\n") + output.write(f"Report Version:,2.0 - Enhanced Edition\n") + output.write("=" * 150 + "\n\n") + + # ===================================================================== + # SECTION 2: CONTEST CONFIGURATION & SETUP + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("CONTEST CONFIGURATION & SETUP\n") + output.write("-" * 150 + "\n\n") + + output.write("Field,Value\n") + output.write(f"Contest ID,{self.contest.id}\n") + output.write(f"Contest Name,\"{self.contest.name}\"\n") + output.write(f"Project Name,\"{self._safe_get('project_name', 'N/A')}\"\n") + + # Status - calculate if not available + status = self._safe_get('status', None) + if not status: + # Calculate status based on dates + now = datetime.now() + try: + start = datetime.fromisoformat(str(self.contest.start_date)) if self.contest.start_date else None + end = datetime.fromisoformat(str(self.contest.end_date)) if self.contest.end_date else None + + if start and end: + if now < start: + status = "Upcoming" + elif now > end: + status = "Past" + else: + status = "Current" + else: + status = "Unknown" + except: + status = "Unknown" + + output.write(f"Contest Status,{status}\n") + output.write(f"Created By,{self._safe_get('created_by', 'Unknown')}\n") + output.write(f"Created At,{self._safe_get('created_at', 'N/A')}\n") + output.write(f"Start Date,{self._safe_get('start_date', 'N/A')}\n") + output.write(f"End Date,{self._safe_get('end_date', 'N/A')}\n") + + # Contest duration calculation + if self.contest.start_date and self.contest.end_date: + try: + start = datetime.fromisoformat(str(self.contest.start_date)) + end = datetime.fromisoformat(str(self.contest.end_date)) + duration = (end - start).days + output.write(f"Contest Duration (Days),{duration}\n") + except: + pass + + # Description + description = self._safe_get('description', None) + if description: + description_cleaned = str(description).replace('\n', ' ').replace('"', '""') + output.write(f"Description,\"{description_cleaned}\"\n") + + output.write("\n") + + # ===================================================================== + # SECTION 3: SUBMISSION RULES & REQUIREMENTS + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("SUBMISSION RULES & REQUIREMENTS\n") + output.write("-" * 150 + "\n\n") + + output.write("Rule Type,Value\n") + + # Submission type + submission_type_map = { + 'new': 'New Articles Only', + 'expansion': 'Improved Articles Only', + 'both': 'Both (New + Improved)' + } + allowed_type = submission_type_map.get( + self._safe_get('allowed_submission_type', 'both'), + self._safe_get('allowed_submission_type', 'both') + ) + output.write(f"Allowed Submission Type,{allowed_type}\n") + + # Minimum requirements + output.write(f"Minimum Byte Count,{self._safe_get('min_byte_count', 0)}\n") + output.write(f"Minimum References Required,{self._safe_get('min_reference_count', 0)}\n") + + # Categories + categories = self._safe_get('categories', None) + if categories: + categories_list = ', '.join(categories) if isinstance(categories, list) else str(categories) + output.write(f"Required Categories,\"{categories_list}\"\n") + output.write(f"Number of Categories,{len(categories) if isinstance(categories, list) else 1}\n") + + # Template + template_link = self._safe_get('template_link', None) + if template_link: + output.write(f"Contest Template,\"{template_link}\"\n") + + # Contest rules + rules = self._safe_get('rules', None) + if rules: + if isinstance(rules, dict) and 'text' in rules: + rules_text = str(rules['text']).replace('\n', ' ').replace('"', '""') + else: + rules_text = str(rules).replace('\n', ' ').replace('"', '""') + output.write(f"Contest Rules,\"{rules_text}\"\n") + + output.write("\n") + + # ===================================================================== + # SECTION 4: SCORING SYSTEM CONFIGURATION + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("SCORING SYSTEM CONFIGURATION\n") + output.write("-" * 150 + "\n\n") + + scoring_params = self._safe_get('scoring_parameters', None) + if scoring_params and isinstance(scoring_params, dict): + if scoring_params.get('enabled'): + output.write("Scoring Mode,Multi-Parameter Scoring\n") + output.write(f"Maximum Score (Accepted),{scoring_params.get('max_score', 10)}\n") + output.write(f"Minimum Score (Rejected),{scoring_params.get('min_score', 0)}\n") + output.write("\n") + + # Scoring parameters breakdown + if 'parameters' in scoring_params: + output.write("Scoring Parameter,Weight (%),Description\n") + for param in scoring_params['parameters']: + param_name = param.get('name', 'Unknown') + param_weight = param.get('weight', 0) + param_desc = param.get('description', 'No description') + output.write(f"\"{param_name}\",{param_weight},\"{param_desc}\"\n") + else: + output.write("Scoring Mode,Simple Accept/Reject\n") + output.write(f"Points for Accepted,{self._safe_get('marks_setting_accepted', 0)}\n") + output.write(f"Points for Rejected,{self._safe_get('marks_setting_rejected', 0)}\n") + else: + output.write("Scoring Mode,Simple Accept/Reject\n") + output.write(f"Points for Accepted,{self._safe_get('marks_setting_accepted', 0)}\n") + output.write(f"Points for Rejected,{self._safe_get('marks_setting_rejected', 0)}\n") + + output.write("\n") + + # ===================================================================== + # SECTION 5: TEAM & ORGANIZERS + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("CONTEST TEAM & ORGANIZERS\n") + output.write("-" * 150 + "\n\n") + + output.write("Role,Members,Count\n") + + # Organizers + organizers = self._safe_get('organizers', None) + if organizers: + organizers_list = ', '.join(organizers) if isinstance(organizers, list) else str(organizers) + organizers_count = len(organizers) if isinstance(organizers, list) else 1 + output.write(f"Organizers,\"{organizers_list}\",{organizers_count}\n") + + # Jury members + jury = self._safe_get('jury_members', None) + if jury: + jury_list = ', '.join(jury) if isinstance(jury, list) else str(jury) + jury_count = len(jury) if isinstance(jury, list) else 1 + output.write(f"Jury Members,\"{jury_list}\",{jury_count}\n") + + output.write("\n") + + # ===================================================================== + # SECTION 6: EXECUTIVE SUMMARY & KEY METRICS + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("EXECUTIVE SUMMARY & KEY METRICS\n") + output.write("-" * 150 + "\n\n") + + output.write("Metric,Value,Percentage\n") + output.write(f"Total Submissions,{stats['total_submissions']},100.00%\n") + output.write(f"Accepted Submissions,{stats['accepted']},{round((stats['accepted'] / stats['total_submissions'] * 100) if stats['total_submissions'] > 0 else 0, 2)}%\n") + output.write(f"Rejected Submissions,{stats['rejected']},{round((stats['rejected'] / stats['total_submissions'] * 100) if stats['total_submissions'] > 0 else 0, 2)}%\n") + output.write(f"Pending Review,{stats['pending']},{round((stats['pending'] / stats['total_submissions'] * 100) if stats['total_submissions'] > 0 else 0, 2)}%\n") + output.write(f"Total Points Awarded,{stats['total_points']},N/A\n") + output.write(f"Unique Participants,{stats['unique_participants']},N/A\n") + + # Additional metrics + if stats['unique_participants'] > 0: + avg_submissions = round(stats['total_submissions'] / stats['unique_participants'], 2) + output.write(f"Average Submissions per Participant,{avg_submissions},N/A\n") + + if stats['accepted'] > 0: + avg_points_per_accepted = round(stats['total_points'] / stats['accepted'], 2) + output.write(f"Average Points per Accepted Article,{avg_points_per_accepted},N/A\n") + + output.write("\n") + + # ===================================================================== + # SECTION 7: JUDGE PERFORMANCE ANALYSIS + # ===================================================================== + if judges: + output.write("-" * 150 + "\n") + output.write("JUDGE PERFORMANCE ANALYSIS\n") + output.write("-" * 150 + "\n\n") + + output.write("Judge Username,Total Reviewed,Accepted,Rejected,Pending,Acceptance Rate (%),Rejection Rate (%)\n") + + for judge in judges: + pending = stats['total_submissions'] - judge['total_reviewed'] + rejection_rate = round(100 - judge['acceptance_rate'], 2) + + output.write( + f"\"{judge['judge_username']}\"," + f"{judge['total_reviewed']}," + f"{judge['accepted']}," + f"{judge['rejected']}," + f"{pending}," + f"{judge['acceptance_rate']}," + f"{rejection_rate}\n" + ) + + output.write("\n") + + # ===================================================================== + # SECTION 8: TOP CONTRIBUTORS LEADERBOARD + # ===================================================================== + output.write("-" * 150 + "\n") + output.write(f"TOP {len(top_contributors)} CONTRIBUTORS LEADERBOARD\n") + output.write("-" * 150 + "\n\n") + + output.write("Rank,Username,Email,Total Submissions,Accepted,Rejected,Pending,Total Points,Acceptance Rate (%),Avg Points per Submission\n") + + for contrib in top_contributors: + if contrib['total_submissions'] > 0: + user_acceptance = round((contrib['accepted'] / contrib['total_submissions']) * 100, 2) + avg_points = round(contrib['total_points'] / contrib['total_submissions'], 2) + else: + user_acceptance = 0 + avg_points = 0 + + output.write( + f"{contrib['rank']}," + f"\"{contrib['username']}\"," + f"\"{contrib['email']}\"," + f"{contrib['total_submissions']}," + f"{contrib['accepted']}," + f"{contrib['rejected']}," + f"{contrib['pending']}," + f"{contrib['total_points']}," + f"{user_acceptance}," + f"{avg_points}\n" + ) + output.write("\n") + + # ===================================================================== + # SECTION 9: DAILY SUBMISSION TIMELINE + # ===================================================================== + if timeline: + output.write("-" * 150 + "\n") + output.write("DAILY SUBMISSION TIMELINE\n") + output.write("-" * 150 + "\n\n") + + output.write("Date,Total Submissions,Accepted,Rejected,Pending,Acceptance Rate (%),Daily Trend,Cumulative Total\n") + + cumulative = 0 + for idx, day in enumerate(timeline): + total = day['total'] + accepted = day['accepted'] + rejected = day['rejected'] + pending = total - accepted - rejected + + daily_acceptance = round((accepted / total) * 100, 2) if total > 0 else 0 + + # Calculate trend + if idx > 0: + prev_total = timeline[idx-1]['total'] + trend = "↑ Increasing" if total > prev_total else "↓ Decreasing" if total < prev_total else "→ Stable" + else: + trend = "First Day" + + cumulative += total + + output.write( + f"{day['date']}," + f"{total}," + f"{accepted}," + f"{rejected}," + f"{pending}," + f"{daily_acceptance}," + f"\"{trend}\"," + f"{cumulative}\n" + ) + output.write("\n") + + # ===================================================================== + # SECTION 10: COMPLETE SUBMISSIONS DATABASE (ALL DETAILS) + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("COMPLETE SUBMISSIONS DATABASE - ALL ARTICLE DETAILS\n") + output.write("-" * 150 + "\n\n") + + # Enhanced header with ALL fields + output.write( + "Submission ID," + "Participant Username," + "Participant Email," + "Article Title," + "Article URL," + "Original Article Author," + "Latest Revision Author," + "Total Bytes," + "Original Bytes," + "Expansion Bytes," + "Expansion Type," + "Reference Count," + "Meets Min References," + "Meets Min Bytes," + "Article Created Date," + "Latest Revision Date," + "Status," + "Score," + "Submitted Date," + "Reviewed Date," + "Reviewed By," + "Days to Review," + "Review Comments\n" + ) + + # Fetch enhanced submission data + from app.models.submission import Submission + from app.models.user import User + + submissions_query = Submission.query.filter_by( + contest_id=self.contest.id + ).order_by( + Submission.score.desc(), + Submission.submitted_at.desc() + ).all() + + for sub in submissions_query: + # Get participant details + participant = User.query.get(sub.user_id) + participant_name = participant.username if participant else 'Unknown' + participant_email = participant.email if participant else 'N/A' + + # Get reviewer details + reviewer_name = 'Not Reviewed' + if sub.reviewed_by: + reviewer = User.query.get(sub.reviewed_by) + reviewer_name = reviewer.username if reviewer else f'User {sub.reviewed_by}' + + # Calculate byte counts + original_bytes = sub.article_word_count or 0 + expansion_bytes = sub.article_expansion_bytes or 0 + total_bytes = original_bytes + expansion_bytes + + # Determine expansion type + if expansion_bytes > 0: + expansion_type = "Added Content" + elif expansion_bytes < 0: + expansion_type = "Removed Content" + else: + expansion_type = "No Change" + + # Reference count + ref_count = sub.article_reference_count or 0 + min_refs = self._safe_get('min_reference_count', 0) + min_bytes = self._safe_get('min_byte_count', 0) + meets_min_refs = "Yes" if ref_count >= min_refs else "No" + meets_min_bytes = "Yes" if total_bytes >= min_bytes else "No" + + # Format dates + article_created = sub.article_created_at.isoformat() if sub.article_created_at else 'N/A' + latest_revision = sub.latest_revision_timestamp.isoformat() if sub.latest_revision_timestamp else 'N/A' + submitted = sub.submitted_at.isoformat() if sub.submitted_at else 'N/A' + reviewed = sub.reviewed_at.isoformat() if sub.reviewed_at else 'Not Reviewed' + + # Calculate days to review + days_to_review = 'N/A' + if sub.submitted_at and sub.reviewed_at: + delta = sub.reviewed_at - sub.submitted_at + days_to_review = delta.days + + # Review comments + review_comments = (sub.review_comment or '').replace('"', '""').replace('\n', ' ') if sub.review_comment else 'No comments' + + output.write( + f"{sub.id}," + f"\"{participant_name}\"," + f"\"{participant_email}\"," + f"\"{sub.article_title}\"," + f"\"{sub.article_link}\"," + f"\"{sub.article_author or 'Unknown'}\"," + f"\"{sub.latest_revision_author or 'N/A'}\"," + f"{total_bytes}," + f"{original_bytes}," + f"{expansion_bytes}," + f"\"{expansion_type}\"," + f"{ref_count}," + f"{meets_min_refs}," + f"{meets_min_bytes}," + f"{article_created}," + f"{latest_revision}," + f"{sub.status}," + f"{sub.score or 0}," + f"{submitted}," + f"{reviewed}," + f"\"{reviewer_name}\"," + f"{days_to_review}," + f"\"{review_comments}\"\n" + ) + + output.write("\n") + + # ===================================================================== + # SECTION 11: PARTICIPANT-WISE DETAILED BREAKDOWN + # ===================================================================== + output.write("-" * 150 + "\n") + output.write("PARTICIPANT-WISE DETAILED BREAKDOWN\n") + output.write("-" * 150 + "\n\n") + + output.write( + "Username," + "Email," + "Total Articles," + "Accepted," + "Rejected," + "Pending," + "Total Bytes," + "Avg Bytes," + "Total References," + "Avg References," + "Total Points," + "Avg Score," + "Highest Score," + "Lowest Score," + "Success Rate (%),First Submission,Last Submission,Active Days\n" + ) + + # Group submissions by user + from app.models.user import User + user_stats = defaultdict(lambda: { + 'email': '', + 'total': 0, + 'accepted': 0, + 'rejected': 0, + 'pending': 0, + 'total_bytes': 0, + 'total_refs': 0, + 'total_points': 0, + 'scores': [], + 'submission_dates': [] + }) + + for sub in submissions_query: + participant = User.query.get(sub.user_id) + username = participant.username if participant else 'Unknown' + email = participant.email if participant else 'N/A' + + user_stats[username]['email'] = email + user_stats[username]['total'] += 1 + + if sub.status == 'accepted': + user_stats[username]['accepted'] += 1 + elif sub.status == 'rejected': + user_stats[username]['rejected'] += 1 + else: + user_stats[username]['pending'] += 1 + + # Calculate bytes + total_bytes = (sub.article_word_count or 0) + (sub.article_expansion_bytes or 0) + user_stats[username]['total_bytes'] += total_bytes + + # References + user_stats[username]['total_refs'] += (sub.article_reference_count or 0) + + # Points and scores + score = sub.score or 0 + user_stats[username]['total_points'] += score + user_stats[username]['scores'].append(score) + + # Submission dates + if sub.submitted_at: + user_stats[username]['submission_dates'].append(sub.submitted_at) + + # Write participant summary + for username in sorted(user_stats.keys()): + stats_data = user_stats[username] + total = stats_data['total'] + + avg_bytes = round(stats_data['total_bytes'] / total, 2) if total > 0 else 0 + avg_refs = round(stats_data['total_refs'] / total, 2) if total > 0 else 0 + avg_score = round(stats_data['total_points'] / total, 2) if total > 0 else 0 + success_rate = round((stats_data['accepted'] / total) * 100, 2) if total > 0 else 0 + + highest_score = max(stats_data['scores']) if stats_data['scores'] else 0 + lowest_score = min(stats_data['scores']) if stats_data['scores'] else 0 + + # Date range + if stats_data['submission_dates']: + first_date = min(stats_data['submission_dates']).isoformat() + last_date = max(stats_data['submission_dates']).isoformat() + active_days = (max(stats_data['submission_dates']) - min(stats_data['submission_dates'])).days + 1 + else: + first_date = 'N/A' + last_date = 'N/A' + active_days = 0 + + output.write( + f"\"{username}\"," + f"\"{stats_data['email']}\"," + f"{total}," + f"{stats_data['accepted']}," + f"{stats_data['rejected']}," + f"{stats_data['pending']}," + f"{stats_data['total_bytes']}," + f"{avg_bytes}," + f"{stats_data['total_refs']}," + f"{avg_refs}," + f"{stats_data['total_points']}," + f"{avg_score}," + f"{highest_score}," + f"{lowest_score}," + f"{success_rate}," + f"{first_date}," + f"{last_date}," + f"{active_days}\n" + ) + + output.write("\n") + + # ===================================================================== + # SECTION 12: ARTICLE INSIGHTS & ANALYTICS + # ===================================================================== + if submissions_query: + output.write("-" * 150 + "\n") + output.write("ARTICLE INSIGHTS & ANALYTICS\n") + output.write("-" * 150 + "\n\n") + + # Calculate insights + total_bytes_all = sum((s.article_word_count or 0) + (s.article_expansion_bytes or 0) for s in submissions_query) + total_refs_all = sum(s.article_reference_count or 0 for s in submissions_query) + + expansion_added = sum(s.article_expansion_bytes for s in submissions_query if s.article_expansion_bytes and s.article_expansion_bytes > 0) + expansion_removed = sum(abs(s.article_expansion_bytes) for s in submissions_query if s.article_expansion_bytes and s.article_expansion_bytes < 0) + + output.write("Insight Category,Value\n") + output.write(f"Total Bytes Contributed,{total_bytes_all}\n") + output.write(f"Total References Added,{total_refs_all}\n") + output.write(f"Average Bytes per Article,{round(total_bytes_all / len(submissions_query), 2)}\n") + output.write(f"Average References per Article,{round(total_refs_all / len(submissions_query), 2)}\n") + output.write(f"Total Content Expansion,{expansion_added}\n") + output.write(f"Total Content Reduction,{expansion_removed}\n") + output.write(f"Net Content Change,{expansion_added - expansion_removed}\n") + + # Top articles + max_bytes_article = max(submissions_query, key=lambda s: (s.article_word_count or 0) + (s.article_expansion_bytes or 0)) + output.write(f"Largest Article,\"{max_bytes_article.article_title}\" ({(max_bytes_article.article_word_count or 0) + (max_bytes_article.article_expansion_bytes or 0)} bytes)\n") + + max_refs_article = max(submissions_query, key=lambda s: s.article_reference_count or 0) + output.write(f"Most Referenced Article,\"{max_refs_article.article_title}\" ({max_refs_article.article_reference_count or 0} refs)\n") + + max_score_article = max(submissions_query, key=lambda s: s.score or 0) + output.write(f"Highest Scoring Article,\"{max_score_article.article_title}\" ({max_score_article.score or 0} points)\n") + + output.write("\n") + + # ===================================================================== + # SECTION 13: COMPLIANCE & QUALITY METRICS + # ===================================================================== + if submissions_query: + output.write("-" * 150 + "\n") + output.write("COMPLIANCE & QUALITY METRICS\n") + output.write("-" * 150 + "\n\n") + + min_bytes = self._safe_get('min_byte_count', 0) + min_refs = self._safe_get('min_reference_count', 0) + + meets_byte_req = sum(1 for s in submissions_query if ((s.article_word_count or 0) + (s.article_expansion_bytes or 0)) >= min_bytes) + meets_ref_req = sum(1 for s in submissions_query if (s.article_reference_count or 0) >= min_refs) + meets_both = sum(1 for s in submissions_query if + ((s.article_word_count or 0) + (s.article_expansion_bytes or 0)) >= min_bytes and + (s.article_reference_count or 0) >= min_refs) + + output.write("Compliance Metric,Count,Percentage\n") + output.write(f"Meets Byte Requirement ({min_bytes}),{meets_byte_req},{round(meets_byte_req / len(submissions_query) * 100, 2)}%\n") + output.write(f"Meets Reference Requirement ({min_refs}),{meets_ref_req},{round(meets_ref_req / len(submissions_query) * 100, 2)}%\n") + output.write(f"Meets Both Requirements,{meets_both},{round(meets_both / len(submissions_query) * 100, 2)}%\n") + output.write(f"Fails Requirements,{len(submissions_query) - meets_both},{round((len(submissions_query) - meets_both) / len(submissions_query) * 100, 2)}%\n") + + output.write("\n") + + # ===================================================================== + # SECTION 14: REPORT FOOTER + # ===================================================================== + output.write("=" * 150 + "\n") + output.write("END OF COMPREHENSIVE REPORT\n") + output.write("=" * 150 + "\n") + output.write(f"Total Submissions,{len(submissions_query)}\n") + output.write(f"Total Participants,{stats['unique_participants']}\n") + output.write(f"Total Judges,{len(judges) if judges else 0}\n") + output.write(f"Generated,{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')}\n") + output.write(f"Platform,WikiContest v2.0\n") + output.write("=" * 150 + "\n") + + # Save file + file_path = self._save_file(output.getvalue(), 'csv') + return file_path + + def _save_file(self, content, extension): + """Save CSV file to reports directory""" + + if os.path.exists('/data/project'): + reports_dir = '/data/project/wikicontest/reports' + else: + reports_dir = os.path.join(os.path.dirname(__file__), '../../reports') + + os.makedirs(reports_dir, exist_ok=True) + + timestamp = int(datetime.now().timestamp()) + filename = f"contest_{self.contest.id}_report_{timestamp}.{extension}" + file_path = os.path.join(reports_dir, filename) + + with open(file_path, 'w', encoding='utf-8') as f: + f.write(content) + + print(f"✅ Comprehensive Report saved: {file_path}") + + return file_path + + +class PDFReportBuilder: + """Build comprehensive PDF reports with all contest details""" + + def __init__(self, contest, metadata=None): + self.contest = contest + self.metadata = metadata or {} + + def _safe_get(self, attr, default='N/A'): + """Safely get contest attribute with fallback""" + return getattr(self.contest, attr, default) + + def generate(self): + """Generate comprehensive PDF report""" + try: + from reportlab.lib.pagesizes import A4, letter + from reportlab.platypus import ( + SimpleDocTemplate, + Table, + TableStyle, + Paragraph, + Spacer, + PageBreak, + Image, + ) + from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle + from reportlab.lib.units import inch + from reportlab.lib import colors + except ImportError: + raise ImportError("reportlab is required for PDF generation") + + # Fetch data + stats = get_submission_statistics(self.contest.id) + top_n = self.metadata.get("top_n", 20) + top_contributors = get_top_contributors(self.contest.id, limit=top_n) + timeline = get_submission_timeline(self.contest.id) + judges = get_judge_statistics(self.contest.id) + + file_path = self._get_file_path("pdf") + doc = SimpleDocTemplate( + file_path, + pagesize=letter, + title=f"{self.contest.name} - Comprehensive Contest Report", + author="WikiContest Platform", + subject=f"Contest Report - {datetime.now().strftime('%Y-%m-%d')}", + ) + + story = [] + styles = getSampleStyleSheet() + + # Custom styles + title_style = ParagraphStyle( + "CustomTitle", + parent=styles["Heading1"], + fontSize=24, + textColor=colors.HexColor("#006699"), + spaceAfter=30, + alignment=1, + fontName="Helvetica-Bold" + ) + + heading_style = ParagraphStyle( + "CustomHeading", + parent=styles["Heading2"], + fontSize=16, + textColor=colors.HexColor("#006699"), + spaceAfter=12, + spaceBefore=12, + fontName="Helvetica-Bold" + ) + + # ===================================================================== + # TITLE PAGE + # ===================================================================== + story.append(Spacer(1, 1 * inch)) + story.append(Paragraph(f"COMPREHENSIVE CONTEST REPORT", title_style)) + story.append(Paragraph(f"{self.contest.name}", title_style)) + story.append(Spacer(1, 0.5 * inch)) + + # Calculate status + status = self._safe_get('status', 'Unknown') + if status == 'Unknown': + try: + now = datetime.now() + start = datetime.fromisoformat(str(self.contest.start_date)) if self.contest.start_date else None + end = datetime.fromisoformat(str(self.contest.end_date)) if self.contest.end_date else None + if start and end: + status = "Upcoming" if now < start else "Past" if now > end else "Current" + except: + pass + + story.append(Paragraph(f"Status: {status}", styles["Normal"])) + story.append(Paragraph(f"Generated: {datetime.utcnow().strftime('%B %d, %Y at %H:%M UTC')}", styles["Normal"])) + story.append(Paragraph(f"Contest ID: {self.contest.id}", styles["Normal"])) + story.append(PageBreak()) + + # ===================================================================== + # CONTEST INFORMATION + # ===================================================================== + story.append(Paragraph("Contest Information", heading_style)) + + contest_info = [ + ["Field", "Value"], + ["Contest Name", self.contest.name], + ["Project", self._safe_get('project_name', 'N/A')], + ["Created By", self._safe_get('created_by', 'Unknown')], + ["Start Date", str(self._safe_get('start_date', 'N/A'))], + ["End Date", str(self._safe_get('end_date', 'N/A'))], + ] + + # Add duration if available + if self.contest.start_date and self.contest.end_date: + try: + start = datetime.fromisoformat(str(self.contest.start_date)) + end = datetime.fromisoformat(str(self.contest.end_date)) + duration = (end - start).days + contest_info.append(["Duration", f"{duration} days"]) + except: + pass + + info_table = Table(contest_info, colWidths=[2.5 * inch, 4 * inch]) + info_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#006699")), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (0, 0), (-1, -1), 'LEFT'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 10), + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]), + ])) + story.append(info_table) + story.append(Spacer(1, 0.3 * inch)) + + # ===================================================================== + # SCORING SYSTEM + # ===================================================================== + story.append(Paragraph("Scoring System", heading_style)) + + scoring_params = self._safe_get('scoring_parameters', None) + if scoring_params and isinstance(scoring_params, dict) and scoring_params.get('enabled'): + story.append(Paragraph(f"Scoring Mode: Multi-Parameter Scoring", styles["Normal"])) + story.append(Paragraph(f"Maximum Score: {scoring_params.get('max_score', 10)}", styles["Normal"])) + story.append(Paragraph(f"Minimum Score: {scoring_params.get('min_score', 0)}", styles["Normal"])) + story.append(Spacer(1, 0.2 * inch)) + + if 'parameters' in scoring_params: + param_data = [["Parameter", "Weight (%)", "Description"]] + for param in scoring_params['parameters']: + param_data.append([ + param.get('name', 'Unknown'), + str(param.get('weight', 0)), + param.get('description', 'No description') + ]) + + param_table = Table(param_data, colWidths=[2 * inch, 1 * inch, 3.5 * inch]) + param_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#006699")), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (0, 0), (-1, -1), 'LEFT'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 9), + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ])) + story.append(param_table) + else: + story.append(Paragraph(f"Scoring Mode: Simple Accept/Reject", styles["Normal"])) + story.append(Paragraph(f"Points for Accepted: {self._safe_get('marks_setting_accepted', 0)}", styles["Normal"])) + story.append(Paragraph(f"Points for Rejected: {self._safe_get('marks_setting_rejected', 0)}", styles["Normal"])) + + story.append(Spacer(1, 0.3 * inch)) + + # ===================================================================== + # EXECUTIVE SUMMARY + # ===================================================================== + story.append(Paragraph("Executive Summary", heading_style)) + + summary_data = [ + ["Metric", "Value", "Percentage"], + ["Total Submissions", str(stats["total_submissions"]), "100%"], + ["Accepted", str(stats["accepted"]), f"{round((stats['accepted'] / stats['total_submissions'] * 100) if stats['total_submissions'] > 0 else 0, 1)}%"], + ["Rejected", str(stats["rejected"]), f"{round((stats['rejected'] / stats['total_submissions'] * 100) if stats['total_submissions'] > 0 else 0, 1)}%"], + ["Pending", str(stats["pending"]), f"{round((stats['pending'] / stats['total_submissions'] * 100) if stats['total_submissions'] > 0 else 0, 1)}%"], + ["Total Points", str(stats["total_points"]), "-"], + ["Participants", str(stats["unique_participants"]), "-"], + ] + + summary_table = Table(summary_data, colWidths=[2.5 * inch, 2 * inch, 2 * inch]) + summary_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#006699")), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (1, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 10), + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]), + ])) + story.append(summary_table) + story.append(Spacer(1, 0.3 * inch)) + + # Timeline Chart + if timeline and len(timeline) > 1: + chart_path = self._create_timeline_chart(timeline) + if chart_path: + story.append(Paragraph("Submission Timeline", heading_style)) + try: + story.append(Image(chart_path, width=6 * inch, height=3.5 * inch)) + story.append(Spacer(1, 0.3 * inch)) + except: + pass + + # ===================================================================== + # JUDGE PERFORMANCE + # ===================================================================== + if judges: + story.append(PageBreak()) + story.append(Paragraph("Judge Performance Analysis", heading_style)) + + judge_data = [["Judge", "Reviewed", "Accepted", "Rejected", "Accept %"]] + for judge in judges: + judge_data.append([ + judge["judge_username"], + str(judge["total_reviewed"]), + str(judge["accepted"]), + str(judge["rejected"]), + f"{judge['acceptance_rate']}%", + ]) + + judge_table = Table(judge_data, colWidths=[2 * inch, 1.2 * inch, 1.2 * inch, 1.2 * inch, 1 * inch]) + judge_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#006699")), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (1, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 9), + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]), + ])) + story.append(judge_table) + story.append(Spacer(1, 0.3 * inch)) + + # ===================================================================== + # TOP CONTRIBUTORS + # ===================================================================== + story.append(PageBreak()) + story.append(Paragraph(f"Top {len(top_contributors)} Contributors", heading_style)) + + contrib_data = [["Rank", "Username", "Submissions", "Accepted", "Points"]] + for c in top_contributors[:20]: # Limit to 20 for PDF + contrib_data.append([ + str(c["rank"]), + c["username"][:25], + str(c["total_submissions"]), + str(c["accepted"]), + str(c["total_points"]), + ]) + + contrib_table = Table(contrib_data, colWidths=[0.6 * inch, 2.5 * inch, 1.3 * inch, 1.3 * inch, 1 * inch]) + contrib_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#006699")), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (1, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 8), + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]), + ])) + story.append(contrib_table) + + # ===================================================================== + # INSIGHTS + # ===================================================================== + from app.models.submission import Submission + submissions = Submission.query.filter_by(contest_id=self.contest.id).all() + + if submissions: + story.append(PageBreak()) + story.append(Paragraph("Article Insights", heading_style)) + + total_bytes = sum((s.article_word_count or 0) + (s.article_expansion_bytes or 0) for s in submissions) + total_refs = sum(s.article_reference_count or 0 for s in submissions) + + insights_data = [ + ["Metric", "Value"], + ["Total Bytes Contributed", f"{total_bytes:,}"], + ["Total References", f"{total_refs:,}"], + ["Avg Bytes per Article", f"{round(total_bytes / len(submissions), 1):,}"], + ["Avg References per Article", f"{round(total_refs / len(submissions), 1)}"], + ] + + insights_table = Table(insights_data, colWidths=[3 * inch, 3.5 * inch]) + insights_table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#006699")), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (0, 0), (-1, -1), 'LEFT'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 10), + ('GRID', (0, 0), (-1, -1), 0.5, colors.grey), + ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]), + ])) + story.append(insights_table) + + # Footer + story.append(PageBreak()) + story.append(Spacer(1, 2 * inch)) + story.append(Paragraph( + f"End of Report
" + f"Generated by WikiContest Platform v2.0
" + f"{datetime.utcnow().strftime('%B %d, %Y at %H:%M UTC')}
", + styles["Normal"] + )) + + doc.build(story) + return file_path + + def _create_timeline_chart(self, timeline_data): + """Create timeline chart""" + try: + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + from datetime import datetime as dt + + dates = [dt.fromisoformat(d["date"]) for d in timeline_data] + totals = [d["total"] for d in timeline_data] + accepted = [d["accepted"] for d in timeline_data] + rejected = [d["rejected"] for d in timeline_data] + + plt.figure(figsize=(10, 6)) + plt.plot(dates, totals, marker="o", linestyle="-", color="#006699", label="Total", linewidth=2) + plt.plot(dates, accepted, marker="s", linestyle="--", color="#339966", label="Accepted") + plt.plot(dates, rejected, marker="^", linestyle="--", color="#990000", label="Rejected") + + plt.xlabel("Date", fontsize=12) + plt.ylabel("Submissions", fontsize=12) + plt.title("Submission Timeline", fontsize=14, fontweight="bold") + plt.legend() + plt.grid(True, alpha=0.3) + plt.xticks(rotation=45) + plt.tight_layout() + + chart_path = f"/tmp/chart_{self.contest.id}_{int(datetime.now().timestamp())}.png" + plt.savefig(chart_path, dpi=150, bbox_inches="tight") + plt.close() + + return chart_path + except Exception as e: + print(f"Chart error: {e}") + return None + + def _get_file_path(self, extension): + """Get file path""" + if os.path.exists("/data/project"): + reports_dir = "/data/project/wikicontest/reports" + else: + reports_dir = os.path.join(os.path.dirname(__file__), "../../reports") + + os.makedirs(reports_dir, exist_ok=True) + filename = f"contest_{self.contest.id}_report_{int(datetime.now().timestamp())}.{extension}" + return os.path.join(reports_dir, filename) \ No newline at end of file diff --git a/backend/app/utils/report_queries.py b/backend/app/utils/report_queries.py new file mode 100644 index 0000000..2950ee5 --- /dev/null +++ b/backend/app/utils/report_queries.py @@ -0,0 +1,169 @@ +""" +Report Query Functions for WikiContest Application +FIXED: Removed all submission_type references +""" + +from sqlalchemy import func, case, distinct +from app.database import db +from app.models.submission import Submission +from app.models.user import User + + +def get_submission_statistics(contest_id): + """Aggregate submission statistics for a contest""" + stats = db.session.query( + func.count(Submission.id).label('total'), + func.sum(case((Submission.status == 'accepted', 1), else_=0)).label('accepted'), + func.sum(case((Submission.status == 'rejected', 1), else_=0)).label('rejected'), + func.sum(case((Submission.status == 'pending', 1), else_=0)).label('pending'), + func.sum(Submission.score).label('total_points'), + func.count(distinct(Submission.user_id)).label('unique_participants') + ).filter( + Submission.contest_id == contest_id + ).first() + + return { + 'total_submissions': stats.total or 0, + 'accepted': stats.accepted or 0, + 'rejected': stats.rejected or 0, + 'pending': stats.pending or 0, + 'total_points': stats.total_points or 0, + 'unique_participants': stats.unique_participants or 0 + } + + +def get_top_contributors(contest_id, limit=None): + """Get top contributors ranked by total points""" + query = db.session.query( + Submission.user_id, + User.username, + User.email, + func.count(Submission.id).label('total_submissions'), + func.sum(case((Submission.status == 'accepted', 1), else_=0)).label('accepted'), + func.sum(case((Submission.status == 'rejected', 1), else_=0)).label('rejected'), + func.sum(case((Submission.status == 'pending', 1), else_=0)).label('pending'), + func.sum(Submission.score).label('total_points') + ).join( + User, Submission.user_id == User.id + ).filter( + Submission.contest_id == contest_id + ).group_by( + Submission.user_id, User.username, User.email + ).order_by( + func.sum(Submission.score).desc() + ) + + if limit: + query = query.limit(limit) + + results = query.all() + + return [ + { + 'rank': idx + 1, + 'user_id': r.user_id, + 'username': r.username, + 'email': r.email, + 'total_submissions': r.total_submissions, + 'accepted': r.accepted or 0, + 'rejected': r.rejected or 0, + 'pending': r.pending or 0, + 'total_points': r.total_points or 0 + } + for idx, r in enumerate(results) + ] + + +def get_submission_timeline(contest_id): + """Get submissions grouped by date""" + results = db.session.query( + func.date(Submission.submitted_at).label('date'), + func.count(Submission.id).label('total'), + func.sum(case((Submission.status == 'accepted', 1), else_=0)).label('accepted'), + func.sum(case((Submission.status == 'rejected', 1), else_=0)).label('rejected') + ).filter( + Submission.contest_id == contest_id + ).group_by( + func.date(Submission.submitted_at) + ).order_by( + func.date(Submission.submitted_at) + ).all() + + return [ + { + 'date': r.date.isoformat(), + 'total': r.total, + 'accepted': r.accepted or 0, + 'rejected': r.rejected or 0 + } + for r in results + ] + + +def get_submissions_by_type(contest_id): + """ + REMOVED: submission_type not available yet + Returns empty list for now + """ + # Return empty list since submission_type column doesn't exist + return [] + + +def get_judge_statistics(contest_id): + """Get statistics for each judge/reviewer""" + results = db.session.query( + Submission.reviewed_by, + User.username, + func.count(Submission.id).label('total_reviewed'), + func.sum(case((Submission.status == 'accepted', 1), else_=0)).label('accepted'), + func.sum(case((Submission.status == 'rejected', 1), else_=0)).label('rejected') + ).join( + User, Submission.reviewed_by == User.id + ).filter( + Submission.contest_id == contest_id, + Submission.reviewed_by.isnot(None) + ).group_by( + Submission.reviewed_by, User.username + ).order_by( + func.count(Submission.id).desc() + ).all() + + return [ + { + 'judge_username': r.username, + 'total_reviewed': r.total_reviewed, + 'accepted': r.accepted or 0, + 'rejected': r.rejected or 0, + 'acceptance_rate': round((r.accepted / r.total_reviewed * 100), 2) if r.total_reviewed > 0 else 0 + } + for r in results + ] + + +def get_all_submissions(contest_id): + """Get all submissions for contest - FIXED: removed submission_type""" + submissions = db.session.query( + Submission, + User.username + ).join( + User, Submission.user_id == User.id + ).filter( + Submission.contest_id == contest_id + ).order_by( + Submission.score.desc(), + Submission.submitted_at.desc() + ).all() + + return [ + { + 'submission_id': submission.id, + 'username': username, + 'article_title': submission.article_title, + 'article_link': submission.article_link, + 'status': submission.status, + 'score': submission.score or 0, + 'submitted_at': submission.submitted_at.isoformat() if submission.submitted_at else None, + 'reviewed_at': submission.reviewed_at.isoformat() if submission.reviewed_at else None, + } + for submission, username in submissions + ] \ No newline at end of file diff --git a/frontend/src/views/ContestView.vue b/frontend/src/views/ContestView.vue index 0414567..eea344e 100644 --- a/frontend/src/views/ContestView.vue +++ b/frontend/src/views/ContestView.vue @@ -20,6 +20,18 @@ + + @@ -733,9 +745,6 @@ :contest-scoring-config="contest?.scoring_parameters" @reviewed="handleSubmissionReviewed" @deleted="handleSubmissionDeleted" /> - - - @@ -1281,6 +1288,92 @@ + +