diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..97c079ea2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python + +# Virtual Environment +venv/ +env/ +ENV/ + +# Environment variables +.env + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 223b4737d..c28605e04 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -1,24 +1,62 @@ -## 2. `ARCHITECTURE.md` - -```markdown -# Architecture Overview - -Below, sketch (ASCII, hand-drawn JPEG/PNG pasted in, or ASCII art) the high-level components of your agent. - -## Components - -1. **User Interface** - - E.g., Streamlit, CLI, Slack bot - -2. **Agent Core** - - **Planner**: how you break down tasks - - **Executor**: LLM prompt + tool-calling logic - - **Memory**: vector store, cache, or on-disk logs - -3. **Tools / APIs** - - E.g., Google Gemini API, Tools, etc - -4. **Observability** - - Logging of each reasoning step - - Error handling / retries +# CIFR Agent System – Architecture + +Aligned with the official Agentic AI Hackathon template ([odsc2015/agentic-hackathon-template](https://github.com/odsc2015/agentic-hackathon-template)). + +## High-Level Overview +- Goal: Reduce intercompany friction via multi-agent collaboration. +- Gemini usage: `google.genai` for multimodal analysis, planning, and interventions. +- Delivery targets: CLI + Flask demo; cloud-ready with GCP services. + +## Component Diagram (ASCII) +``` +User Request (UI/CLI) + │ + ▼ +Planner (Gemini plan) + │ plan + ▼ +Executor ──────────────────────────────────────────────┐ + │ dispatch │ + ▼ │ +CommunicationAgent ──► KnowledgeAgent (context store) │ + │ │ │ + ▼ │ │ + FrictionDetectionAgent ◄──┘ │ + │ │ + ▼ │ + InterventionAgent → Response / Guidance to user │ +``` + +## Modules +- `src/planner.py`: Uses Gemini text to propose 3–6 JSON steps; heuristic fallback. +- `src/executor.py`: Iterates plan, calls agents, records trace. +- `src/memory.py`: Lightweight in-memory event log for demo traces. +- `cifr_agent_system/communication_agent.py`: Multimodal analysis (Gemini Vision/Text + Language API fallback). +- `cifr_agent_system/friction_detection_agent.py`: Misalignment detection (Gemini + heuristic keywords). +- `cifr_agent_system/intervention_agent.py`: Generates clarifications/action-items/mediation suggestions. +- `cifr_agent_system/knowledge_agent.py`: In-memory knowledge base (store/retrieve/search). +- `cifr_agent_system/config.py`: Env loading, optional insecure SSL for sandboxes, model IDs. + +## Data Flow +1) User goal → Planner → structured steps. +2) Executor loops steps: + - `analyze_messages`: CommunicationAgent performs multimodal analysis; stores context. + - `detect_friction`: FrictionDetectionAgent reasons over stored context. + - `generate_interventions`: InterventionAgent drafts guidance based on friction. +3) KnowledgeAgent stores analyses + interventions. +4) Executor returns results + trace to UI/CLI. + +## Environment & Secrets +- `.env` (gitignored): `GCP_PROJECT_ID`, `GCP_LOCATION`, `GOOGLE_API_KEY` (or per-agent keys), `GEMINI_PRO_MODEL_ID`, `GEMINI_PRO_VISION_MODEL_ID`. +- `key.json` should not be committed; rotate/remove if ever added. +- `ALLOW_INSECURE_SSL=1` only for sandbox environments. + +## Observability (current) +- Structured logging via `logging`; memory trace for demo. +- Flask API prints warnings for quota/heuristic fallbacks. + +## Extension Ideas +- Persist memory to Firestore/Spanner and add retrieval to planner prompts. +- Add tool router for calendars/search and external actions. +- Integrate OpenTelemetry tracing; promote heuristics to typed fallbacks with scores. diff --git a/DEMO.md b/DEMO.md index 4e0b5eeb6..c09d926ff 100644 --- a/DEMO.md +++ b/DEMO.md @@ -1,23 +1,21 @@ -# Demo Video +# CIFR Agent System – Demo -Please record a 3–5 minute walkthrough showing: +Provide a public 3–5 minute video link here (YouTube unlisted, Drive public, Loom, etc.). Submissions without a valid link will not be reviewed. -- The problem you solve -- End-to-end agent behavior on a representative example -- Highlighted “agentic” steps (planning, tool calls, memory use) +## Video Link +- TODO: Paste final link here after recording. ---- +## Timestamped Highlights +- 00:00–00:30 Intro & setup +- 00:30–01:30 User input → Planning (Planner + Executor) +- 01:30–02:30 Tool calls & memory (CommunicationAgent, KnowledgeAgent, FrictionDetectionAgent) +- 02:30–03:30 Final output & edge cases (InterventionAgent + fallback flows) -📺 **Provide a Hosted Public Video Link (YouTube unlisted / Loom / MP4):** MUST BE ON A HOS -https://your.video.link.here +## How to Reproduce +1) `python -m venv .venv && source .venv/bin/activate` +2) `pip install -r cifr_agent_system/requirements.txt` +3) Create `.env` with `GCP_PROJECT_ID`, `GOOGLE_API_KEY`, optional `GEMINI_PRO_MODEL_ID`, `GEMINI_PRO_VISION_MODEL_ID`. +4) Run `python -m cifr_agent_system.main` for the CLI demo. +5) (Optional) Run web dashboard via option 2 in the menu. -PLEASE DO NOT UPLOAD RAW VIDOE FILES. These submissions will not be reviewed. -### Timestamps - -- **00:00–00:30** — Introduction & setup -- **00:30–01:30** — User input → Planning step -- **01:30–02:30** — Tool calls & memory retrieval -- **02:30–03:30** — Final output & edge-case handling - -- Vidoes longer than 5 minutes may not be reviewd. diff --git a/DEMO_GUIDE.md b/DEMO_GUIDE.md new file mode 100644 index 000000000..4047e710c --- /dev/null +++ b/DEMO_GUIDE.md @@ -0,0 +1,206 @@ +# 🚀 CIFR Agent System - Demo Guide + +## Overview +The **CIFR (Collaborative Intelligence & Friction Reduction) Agent System** is an intelligent orchestration platform that coordinates multiple specialized AI agents to monitor, analyze, and improve team collaboration. The system processes collaboration messages through a sophisticated pipeline where each agent plays a critical role in understanding context, detecting issues, and providing actionable insights. + +## 🎯 CIFR Orchestration System + +The CIFR system orchestrates four specialized AI agents in a coordinated workflow: + +**Pipeline Flow:** +``` +Input → Communication Agent → Knowledge Agent → Friction Detection Agent → Intervention Agent → Output +``` + +### How Orchestration Works: +1. **Input Processing:** Messages (text + optional images) enter the system +2. **Communication Analysis:** Communication Agent analyzes content using Gemini AI +3. **Context Storage:** Knowledge Agent stores analysis for future reference +4. **Friction Detection:** Friction Detection Agent identifies issues and patterns +5. **Intervention Generation:** Intervention Agent provides actionable solutions +6. **Output Delivery:** Comprehensive analysis and recommendations returned + +## 🤖 The Four AI Agents + +### 1. 💬 Communication Agent +**Role:** Primary message analyzer using Gemini AI +- Extracts sentiment scores and emotional tone +- Identifies key entities (people, projects, deadlines, etc.) +- Performs multimodal analysis (text + images) +- Provides deep context understanding + +**Technology:** Google Gemini AI (gemini-2.0-flash, gemini-2.5-flash) + +### 2. 📚 Knowledge Agent +**Role:** Central memory hub and context manager +- Stores all communication analyses in knowledge base +- Enables pattern recognition across conversations +- Maintains historical context for better understanding +- Supports cross-message analysis and trend detection + +**Key Feature:** Acts as the "memory" that allows agents to understand patterns over time + +### 3. ⚠️ Friction Detection Agent +**Role:** Conflict and misalignment identifier +- Detects friction points using AI reasoning +- Assesses severity levels (0.0 to 1.0) +- Identifies escalation patterns +- Recognizes recurring issues + +**Technology:** Google Gemini AI for intelligent reasoning + +### 4. 💡 Intervention Agent +**Role:** Solution provider and action generator +- Generates context-aware intervention suggestions +- Proposes clarification messages +- Creates actionable action items +- Suggests mediation strategies + +**Technology:** Google Gemini AI for intelligent recommendation generation + +## 🎯 Demo Examples + +### 1. 😤 Frustrated Delay Scenario +**Message:** "I am really frustrated with the constant delays on this feature. We've been waiting for 3 weeks now and the deadline is approaching fast. This is becoming a serious issue for our team and we need to accelerate immediately!" + +**What to Look For:** +- **Communication Agent:** Negative sentiment score (around -0.6), detects keywords like "frustrated", "delays", "issue" +- **Knowledge Agent:** Stores analysis in knowledge base for future pattern recognition +- **Friction Detection:** Should detect friction with high severity (0.6-0.8) +- **Intervention:** Suggests clarification or immediate action items + +**Demo Value:** Shows how the system identifies emotional friction and urgency, with Knowledge Agent maintaining context for future analysis. + +--- + +### 2. ⏰ Timeline Slippage Issue +**Message:** "The project timeline is slipping again. We were supposed to deliver Phase 2 by Friday, but now it looks like it will be delayed by at least another week. This is the third time we've had to push back deadlines. We need to have a serious discussion about resource allocation and priorities." + +**What to Look For:** +- **Communication Agent:** Negative sentiment, entities like "timeline", "deadline", "delays" +- **Knowledge Agent:** Stores recurring pattern data for trend analysis +- **Friction Detection:** Detects recurring issues and escalation patterns +- **Intervention:** Proposes action items for resource review and priority alignment + +**Demo Value:** Demonstrates pattern recognition and escalation detection, with Knowledge Agent tracking recurring issues. + +--- + +### 3. ✅ Positive Resolution +**Message:** "Great work on the latest sprint! The team really pulled together and we delivered everything on time. The new features are working perfectly and the client is very happy with the progress. Let's keep this momentum going!" + +**What to Look For:** +- **Communication Agent:** Positive sentiment score (around 0.7), positive keywords +- **Knowledge Agent:** Stores positive interaction for team health tracking +- **Friction Detection:** No friction detected +- **Intervention:** No intervention needed (confirms positive state) + +**Demo Value:** Shows the system correctly identifies positive communication and avoids false positives, with Knowledge Agent maintaining positive interaction history. + +--- + +### 4. ⚔️ Team Conflict +**Message:** "I completely disagree with the approach we're taking. The current design doesn't align with our original requirements and I think we're going in the wrong direction. We need to stop and reconsider before we waste more time and resources on this." + +**What to Look For:** +- **Communication Agent:** Negative sentiment, conflict indicators like "disagree", "wrong direction" +- **Knowledge Agent:** Stores conflict context for future reference and pattern tracking +- **Friction Detection:** High severity conflict detection +- **Intervention:** Suggests mediation or joint discussion to resolve differences + +**Demo Value:** Highlights conflict detection and mediation intervention capabilities, with Knowledge Agent maintaining conflict history. + +--- + +### 5. 📊 Data Analysis Request +**Message:** "This graph shows a significant drop in user engagement over the past month. We went from 85% active users to just 62%. The data suggests there might be an issue with the latest update. Can someone analyze this chart and help us understand what's happening?" + +**What to Look For:** +- **Communication Agent:** Multimodal analysis (if image uploaded), detects data-related entities +- **Knowledge Agent:** Stores data analysis context for trend tracking +- **Friction Detection:** Moderate concern detection +- **Intervention:** Suggests investigation or data review action items + +**Demo Value:** Demonstrates multimodal capabilities and data-driven friction detection, with Knowledge Agent maintaining data analysis history. + +--- + +## 🎨 Features Showcased + +### 1. **CIFR Orchestration System** +- **Coordinated Workflow:** Four specialized agents working in harmony +- **Pipeline Processing:** Sequential analysis from input to actionable output +- **Context Preservation:** Knowledge Agent maintains conversation history +- **Intelligent Routing:** Each agent processes and passes context to the next + +### 2. **Four AI Agents Working in Tandem** +- **Communication Agent:** Uses Gemini AI to analyze sentiment, extract entities, and understand context +- **Knowledge Agent:** Central memory hub storing and retrieving context for pattern recognition +- **Friction Detection Agent:** Identifies misalignments, conflicts, and friction points +- **Intervention Agent:** Provides actionable, intelligent suggestions + +### 3. **Gemini AI Integration** +- Real-time AI analysis of text content +- Multimodal support for images/charts +- Natural language understanding +- Context-aware responses +- Per-agent API key configuration for scalability + +### 4. **Knowledge Base & Context Management** +- Persistent storage of all communications +- Pattern recognition across conversations +- Historical context for better understanding +- Trend detection and escalation tracking + +### 5. **Intelligent Friction Detection** +- Sentiment analysis +- Pattern recognition +- Severity assessment +- Contextual understanding +- Recurring issue identification + +### 6. **Actionable Interventions** +- Clarification suggestions +- Action item proposals +- Mediation strategies +- Context-specific recommendations + +## 📝 How to Use the Demo + +1. **Visit:** http://127.0.0.1:9000 +2. **Click Demo Buttons:** Use the pre-built examples to see different scenarios +3. **Or Enter Custom Message:** Type your own collaboration message +4. **Upload Images (Optional):** Add charts, graphs, or diagrams for multimodal analysis +5. **View Results:** See all three agents' analysis in real-time + +## 🎯 Best Practices for Demo + +1. **Start with "Frustrated Delay"** - Most dramatic example showing full pipeline +2. **Show "Positive Resolution"** - Demonstrates accuracy (no false positives) +3. **Try "Team Conflict"** - Highlights mediation capabilities +4. **Use Custom Messages** - Show real-time analysis of audience input +5. **Upload Images** - Demonstrate multimodal capabilities if time permits + +## 🔍 What Makes This Demo Compelling + +- **Real-World Scenarios:** Examples based on actual collaboration challenges +- **AI-Powered Intelligence:** Shows advanced Gemini AI capabilities +- **Complete Pipeline:** Demonstrates end-to-end agent workflow +- **Actionable Insights:** Provides real value, not just analysis +- **Professional UI:** Modern, intuitive interface + +## 💡 Key Talking Points + +1. **CIFR Orchestration System:** Intelligent coordination of four specialized agents in a seamless pipeline +2. **Multi-Agent Architecture:** Four specialized AI agents (Communication, Knowledge, Friction Detection, Intervention) working together +3. **Knowledge Base Intelligence:** Central memory hub enabling pattern recognition and historical context +4. **Gemini AI Integration:** State-of-the-art language understanding with per-agent API key configuration +5. **Proactive Intervention:** Not just detection, but actionable solutions tailored to context +6. **Multimodal Analysis:** Handles text, images, and data visualizations +7. **Real-Time Processing:** Fast, responsive analysis through coordinated agent workflow +8. **Context-Aware:** Knowledge Agent maintains conversation history for better understanding over time + +--- + +**Ready to demo?** Visit http://127.0.0.1:9000 and click any demo button to get started! 🚀 + diff --git a/EXPLANATION.md b/EXPLANATION.md index 564f4a172..3b6796c8c 100644 --- a/EXPLANATION.md +++ b/EXPLANATION.md @@ -1,35 +1,41 @@ -# Technical Explanation +# CIFR Agent System – Explanation + +This document follows the Agentic AI Hackathon template requirements. + +## Planning Style +- Planner uses Gemini text model (`google.genai`) to turn user goals into 3–6 sub-tasks. +- Output format: JSON list with `id`, `action`, `input`, `notes`, `expected_output`. +- Fallback heuristics ensure planning works even if Gemini is unavailable. + +## Execution Flow +1) Planner creates steps. +2) Executor iterates steps and routes to agents: + - `analyze_messages` → CommunicationAgent (Gemini Vision/Text + Language API). + - `detect_friction` → FrictionDetectionAgent (Gemini reasoning). + - `generate_interventions` → InterventionAgent (Gemini suggestions). +3) Results and reasoning traces are stored in KnowledgeAgent and Memory store. + +## Memory Usage +- `KnowledgeAgent` maintains contextual store for analyses and recommendations. +- `src/memory.py` provides a lightweight append-only log for executions. +- Future: persist to datastore and add retrieval-augmented prompts. + +## Tool Integration +- Gemini API via `google.genai` for: + - Multimodal analysis (CommunicationAgent). + - Friction reasoning (FrictionDetectionAgent). + - Intervention drafting (InterventionAgent). + - Planning (Planner). +- GCP language services for sentiment/entities as a fallback/augmenter. + +## Limitations +- Requires valid `GOOGLE_API_KEY`; Config raises if missing. +- Memory is in-process only; no persistence yet. +- Planning/exec parsing assumes well-formed Gemini JSON; guarded with fallbacks. +- Demo uses synthetic messages; real integrations (Slack/Gmail/Drive) are stubs. + +## Known Risks +- `key.json` is present locally; remove from git history and rely on `.env`. +- Network/API failures degrade to heuristic flows; add retries/backoff for prod. -## 1. Agent Workflow - -Describe step-by-step how your agent processes an input: -1. Receive user input -2. (Optional) Retrieve relevant memory -3. Plan sub-tasks (e.g., using ReAct / BabyAGI pattern) -4. Call tools or APIs as needed -5. Summarize and return final output - -## 2. Key Modules - -- **Planner** (`planner.py`): … -- **Executor** (`executor.py`): … -- **Memory Store** (`memory.py`): … - -## 3. Tool Integration - -List each external tool or API and how you call it: -- **Search API**: function `search(query)` -- **Calculator**: LLM function calling - -## 4. Observability & Testing - -Explain your logging and how judges can trace decisions: -- Logs saved in `logs/` directory -- `TEST.sh` exercises main path - -## 5. Known Limitations - -Be honest about edge cases or performance bottlenecks: -- Long-running API calls -- Handling of ambiguous user inputs diff --git a/README.md b/README.md index 1bc06dbb8..fdd224ef4 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,62 @@ -# Agentic AI App Hackathon Template - -Welcome! This repository is your starting point for the **Agentic AI App Hackathon**. It includes: - -- A consistent folder structure -- An environment spec (`environment.yml` or `Dockerfile`) -- Documentation placeholders to explain your design and demo - -## 📋 Submission Checklist - -- [ ] All code in `src/` runs without errors -- [ ] `ARCHITECTURE.md` contains a clear diagram sketch and explanation -- [ ] `EXPLANATION.md` covers planning, tool use, memory, and limitations -- [ ] `DEMO.md` links to a 3–5 min video with timestamped highlights - - -## 🚀 Getting Started - -1. **Clone / Fork** this template. Very Important. Fork Name MUST be the same name as the teamn name - - -## 📂 Folder Layout - -![Folder Layout Diagram](images/folder-githb.png) - - - -## 🏅 Judging Criteria - -- **Technical Excellence ** - This criterion evaluates the robustness, functionality, and overall quality of the technical implementation. Judges will assess the code's efficiency, the absence of critical bugs, and the successful execution of the project's core features. - -- **Solution Architecture & Documentation ** - This focuses on the clarity, maintainability, and thoughtful design of the project's architecture. This includes assessing the organization and readability of the codebase, as well as the comprehensiveness and conciseness of documentation (e.g., GitHub README, inline comments) that enables others to understand and potentially reproduce or extend the solution. - -- **Innovative Gemini Integration ** - This criterion specifically assesses how effectively and creatively the Google Gemini API has been incorporated into the solution. Judges will look for novel applications, efficient use of Gemini's capabilities, and the impact it has on the project's functionality or user experience. You are welcome to use additional Google products. - -- **Societal Impact & Novelty ** - This evaluates the project's potential to address a meaningful problem, contribute positively to society, or offer a genuinely innovative and unique solution. Judges will consider the originality of the idea, its potential real‑world applicability, and its ability to solve a challenge in a new or impactful way. - +# CIFR Agent System (Agentic AI Hackathon) + +This repo aligns with the [Agentic AI App Hackathon template](https://github.com/odsc2015/agentic-hackathon-template) and implements a multi-agent workflow for communication analysis, friction detection, and intervention suggestions using Google Gemini. + +## Contents +- `src/`: hackathon-required planner/executor/memory modules. +- `cifr_agent_system/`: domain agents (communication, friction detection, intervention, knowledge, utils, config). +- `frontend/`: lightweight Flask UI for demo. +- Docs: `ARCHITECTURE.md`, `EXPLANATION.md`, `DEMO.md`, `API_KEY_SETUP.md`. + +## Prerequisites +- Python 3.10+ recommended (works on 3.9 with shims, but prefer 3.10+). +- Google Gemini API key from Google AI Studio. +- GCP Project ID if using Cloud Language fallback. + +## Setup +```bash +python -m venv .venv && source .venv/bin/activate +pip install -r cifr_agent_system/requirements.txt +``` + +Create a `.env` (not committed) with: +``` +GCP_PROJECT_ID= +GCP_LOCATION=us-central1 +GOOGLE_API_KEY= +# Optional per-agent overrides +GOOGLE_API_KEY_CA= +GOOGLE_API_KEY_FA= +GOOGLE_API_KEY_IA= +GEMINI_PRO_MODEL_ID=gemini-2.0-flash +GEMINI_PRO_VISION_MODEL_ID=gemini-2.5-flash +``` + +## Running +- CLI demo: `python -m cifr_agent_system.main` +- Flask UI: `python app.py` then open `http://localhost:5000` +- Planner/Executor programmatic use: +```python +from src.executor import Executor +from src.memory import MemoryStore +# instantiate agents from cifr_agent_system.* +``` + +## Hackathon Checklist (per template) +- Fork named after team/participant. +- Agent built under `src/` (planner/executor/memory present). +- Gemini API integrated (`google.genai` in planner + agents). +- Docs filled: `README.md`, `ARCHITECTURE.md`, `EXPLANATION.md`, `DEMO.md` (video link pending). +- Video demo to be recorded and linked in `DEMO.md`. + +## Security & Secrets +- Do **not** commit `.env` or `key.json`. They are gitignored. Rotate any previously committed keys. +- Set `ALLOW_INSECURE_SSL=1` only in constrained sandboxes; keep unset for real deployments. + +## Troubleshooting +- If Flask complains about `.env` permissions, set `FLASK_SKIP_DOTENV=1` (already defaulted in `app.py`). +- If Gemini quota is hit, agents fall back to heuristic logic and log warnings. + +## License +Apache-2.0 (template baseline). diff --git a/app.py b/app.py new file mode 100644 index 000000000..531867d52 --- /dev/null +++ b/app.py @@ -0,0 +1,259 @@ +import logging +import os +from datetime import datetime +from flask import Flask, request, jsonify, render_template +from dotenv import load_dotenv +import base64 +import json + +# Flask CLI tries to auto-load .env and can crash if the file isn't readable. +# Prevent that and rely on our explicit load_dotenv below. +os.environ.setdefault("FLASK_SKIP_DOTENV", "1") + +# Ensure environment variables are loaded for Config to access them. +# If the .env file is not readable (e.g., permissions or sandbox filters), +# fall back to existing environment variables. +try: + load_dotenv(os.path.join(os.path.dirname(__file__), ".env")) +except PermissionError: + print("Warning: .env not readable; relying on existing environment variables.") + +# Import agents and Config from your cifr_agent_system package +from cifr_agent_system.config import Config +from cifr_agent_system.communication_agent import CommunicationAgent +from cifr_agent_system.knowledge_agent import KnowledgeAgent +from cifr_agent_system.friction_detection_agent import FrictionDetectionAgent +from cifr_agent_system.intervention_agent import InterventionAgent +from cifr_agent_system.utils import generate_unique_id + + +logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") +logger = logging.getLogger(__name__) + + +def serialize_google_cloud_object(obj): + """Recursively converts Google Cloud client library objects to JSON serializable types.""" + # Handle dicts first (before checking __dict__) + if isinstance(obj, dict): + return {k: serialize_google_cloud_object(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + # Recursively process lists/tuples + return [serialize_google_cloud_object(elem) for elem in obj] + elif isinstance(obj, datetime): + return obj.isoformat() + elif hasattr(obj, '__dict__'): + # Handle Google Cloud protobuf objects + if hasattr(obj, '_pb'): + return serialize_google_cloud_object(obj._pb) + # Recursively process dictionary-like objects + return {k: serialize_google_cloud_object(v) for k, v in obj.__dict__.items() if not k.startswith('_')} + # Handle specific Google Cloud client objects like Sentiment.score, Sentiment.magnitude, and Entities + elif hasattr(obj, 'score') and hasattr(obj, 'magnitude'): + return {"score": obj.score, "magnitude": obj.magnitude} + elif hasattr(obj, 'entities') and isinstance(obj.entities, (list, tuple)): + return [serialize_google_cloud_object(entity) for entity in obj.entities] + elif hasattr(obj, 'name') and hasattr(obj, 'type_') and hasattr(obj, 'salience'): + return {"name": obj.name, "type": str(obj.type_), "salience": obj.salience} + elif hasattr(obj, 'name') and hasattr(obj, 'confidence'): + return {"name": obj.name, "confidence": obj.confidence} + # Handle generic protobuf messages by converting to a dictionary + elif hasattr(obj, 'DESCRIPTOR') and hasattr(obj, 'ListFields'): + return {field.name: serialize_google_cloud_object(getattr(obj, field.name)) for field, _ in obj.ListFields()} + # Base cases for primitive types + elif isinstance(obj, (int, float, str, bool, type(None))): + return obj + # For any other object type, try to stringify or represent as dict + else: + try: + # Try to convert to dict if it has to_dict or similar method + if hasattr(obj, 'to_dict'): + return obj.to_dict() + # Or just convert to string representation for unknown complex objects + return str(obj) + except Exception: + return f"" + +app = Flask(__name__, + static_folder='./frontend/static', + template_folder='./frontend/templates') + +# Initialize agents globally or per-request if state management is complex +# For simplicity, we'll initialize them globally here. +logger.info("🤖 CIFR Agent System - Initializing Agents") +logger.info("📋 Project ID: %s", Config.GCP_PROJECT_ID) +logger.info("📍 Location: %s", Config.GCP_LOCATION) +logger.info( + "🔑 API Key Configuration: default=%s, CA=%s, FA=%s, IA=%s", + "set" if Config.GOOGLE_API_KEY else "missing", + "dedicated" if Config.GOOGLE_API_KEY_CA else "default", + "dedicated" if Config.GOOGLE_API_KEY_FA else "default", + "dedicated" if Config.GOOGLE_API_KEY_IA else "default", +) + +knowledge_agent = KnowledgeAgent(project_id=Config.GCP_PROJECT_ID, location=Config.GCP_LOCATION) +communication_agent = CommunicationAgent(project_id=Config.GCP_PROJECT_ID, knowledge_agent=knowledge_agent, location=Config.GCP_LOCATION) +friction_detection_agent = FrictionDetectionAgent(project_id=Config.GCP_PROJECT_ID, knowledge_agent=knowledge_agent, location=Config.GCP_LOCATION) +intervention_agent = InterventionAgent(project_id=Config.GCP_PROJECT_ID, knowledge_agent=knowledge_agent, friction_detection_agent=friction_detection_agent, location=Config.GCP_LOCATION) + +@app.route('/') +def index(): + return render_template('index.html') + +@app.route('/api/process_message', methods=['POST']) +def process_message_api(): + data = request.form + text_content = data.get('text_content', '') + image_file = request.files.get('image_file') + + image_bytes = None + if image_file: + image_bytes = image_file.read() + + message_id = generate_unique_id("web_message") + timestamp = datetime.now().isoformat() + + sample_message = { + "message_id": message_id, + "text_content": text_content, + "image_bytes": image_bytes, + "timestamp": timestamp, + "sender": "Web User" + } + + logger.info("[API] Processing message ID: %s", message_id) + + results = { + "original_message": sample_message, + "communication_analysis": None, + "knowledge_update_status": None, + "friction_detection": None, + "intervention_suggestion": None, + "error": None, + "warnings": [] + } + + try: + # 1. Communication Agent processing + comm_agent_results = communication_agent.process_collaboration_message(sample_message) + logger.debug("Communication agent results type: %s", type(comm_agent_results)) + # Ensure proper JSON serialization - always return a dict, never a string + comm_serialized = serialize_google_cloud_object(comm_agent_results) + logger.debug("After serialization, type: %s, is dict: %s", type(comm_serialized), isinstance(comm_serialized, dict)) + # Ensure it's a dict, not a string + if isinstance(comm_serialized, str): + logger.warning("Communication analysis was serialized as string! Attempting to parse...") + try: + comm_serialized = json.loads(comm_serialized) + except: + logger.warning("JSON parse failed, trying Python literal eval") + import ast + try: + comm_serialized = ast.literal_eval(comm_serialized) + except Exception as e: + logger.error("Failed to parse communication analysis: %s", e) + comm_serialized = {"error": "Failed to parse communication analysis", "raw": comm_serialized[:200]} + if not isinstance(comm_serialized, dict): + logger.error("Communication analysis is not a dict after processing! Type: %s", type(comm_serialized)) + comm_serialized = {"error": "Invalid response format", "type": str(type(comm_serialized))} + results["communication_analysis"] = comm_serialized + results["knowledge_update_status"] = "Context stored under 'communication_analysis_{}'".format(message_id) + + # Check for quota errors and API source in communication analysis + comm_str = json.dumps(comm_serialized) if isinstance(comm_serialized, dict) else str(comm_agent_results) + if "429" in comm_str or "RESOURCE_EXHAUSTED" in comm_str or "quota" in comm_str.lower(): + results["warnings"].append("Communication Agent: Gemini API quota exceeded. Using fallback service.") + # Check if using fallback service + analysis = comm_serialized.get("analysis", {}) if isinstance(comm_serialized, dict) else {} + if isinstance(analysis, dict) and analysis.get("api_source") == "fallback": + results["warnings"].append("Communication Agent: Using OpenAI-compatible fallback service (Gemini quota exhausted).") + + # 2. Friction Detection + friction_results = friction_detection_agent.detect_communication_friction(f"communication_analysis_{message_id}") + logger.debug("Friction detection results type: %s", type(friction_results)) + # Ensure proper JSON serialization - always return a dict, never a string + friction_serialized = serialize_google_cloud_object(friction_results) + logger.debug("After serialization, type: %s, is dict: %s", type(friction_serialized), isinstance(friction_serialized, dict)) + # Ensure it's a dict, not a string + if isinstance(friction_serialized, str): + logger.warning("Friction detection was serialized as string! Attempting to parse...") + try: + friction_serialized = json.loads(friction_serialized) + except: + logger.warning("JSON parse failed, trying Python literal eval") + import ast + try: + friction_serialized = ast.literal_eval(friction_serialized) + except Exception as e: + logger.error("Failed to parse friction detection: %s", e) + friction_serialized = {"error": "Failed to parse friction detection", "raw": friction_serialized[:200]} + if not isinstance(friction_serialized, dict): + logger.error("Friction detection is not a dict after processing! Type: %s", type(friction_serialized)) + friction_serialized = {"error": "Invalid response format", "type": str(type(friction_serialized))} + results["friction_detection"] = friction_serialized + + # Check for quota errors and API source in friction detection + friction_str = json.dumps(friction_serialized) if isinstance(friction_serialized, dict) else str(friction_results) + if "429" in friction_str or "RESOURCE_EXHAUSTED" in friction_str or "quota" in friction_str.lower(): + results["warnings"].append("Friction Detection Agent: Gemini API quota exceeded. Using fallback service.") + # Check if using fallback service + if isinstance(friction_serialized, dict) and friction_serialized.get("api_source") == "fallback": + results["warnings"].append("Friction Detection Agent: Using OpenAI-compatible fallback service (Gemini quota exhausted).") + + # 3. Intervention Suggestion + intervention_suggestion = intervention_agent.suggest_intervention(f"communication_analysis_{message_id}") + logger.debug("Intervention suggestion results type: %s", type(intervention_suggestion)) + # Ensure proper JSON serialization - always return a dict, never a string + intervention_serialized = serialize_google_cloud_object(intervention_suggestion) + logger.debug("After serialization, type: %s, is dict: %s", type(intervention_serialized), isinstance(intervention_serialized, dict)) + # Ensure it's a dict, not a string + if isinstance(intervention_serialized, str): + logger.warning("Intervention suggestion was serialized as string! Attempting to parse...") + try: + intervention_serialized = json.loads(intervention_serialized) + except: + logger.warning("JSON parse failed, trying Python literal eval") + import ast + try: + intervention_serialized = ast.literal_eval(intervention_serialized) + except Exception as e: + logger.error("Failed to parse intervention suggestion: %s", e) + intervention_serialized = {"error": "Failed to parse intervention suggestion", "raw": intervention_serialized[:200]} + if not isinstance(intervention_serialized, dict): + logger.error("Intervention suggestion is not a dict after processing! Type: %s", type(intervention_serialized)) + intervention_serialized = {"error": "Invalid response format", "type": str(type(intervention_serialized))} + results["intervention_suggestion"] = intervention_serialized + + # Check for quota errors in intervention + intervention_str = json.dumps(intervention_serialized) if isinstance(intervention_serialized, dict) else str(intervention_suggestion) + if "429" in intervention_str or "RESOURCE_EXHAUSTED" in intervention_str or "quota" in intervention_str.lower(): + results["warnings"].append("Intervention Agent: Gemini API quota exceeded. Using fallback service.") + + except Exception as e: + results["error"] = str(e) + logger.exception("[API Error] %s", e) + + # Ensure all responses are JSON-serializable + try: + # Test serialization + json.dumps(results) + except (TypeError, ValueError) as e: + logger.warning("Response contains non-serializable objects, attempting to fix: %s", e) + # Convert any remaining non-serializable objects to strings + def make_serializable(obj): + if isinstance(obj, dict): + return {k: make_serializable(v) for k, v in obj.items()} + elif isinstance(obj, (list, tuple)): + return [make_serializable(item) for item in obj] + elif isinstance(obj, (str, int, float, bool, type(None))): + return obj + else: + return str(obj) + results = make_serializable(results) + + return jsonify(results) + +if __name__ == '__main__': + # It's recommended to run Flask in development mode for easier debugging. + # For production, use a production-ready WSGI server like Gunicorn or uWSGI. + app.run(debug=True, host='0.0.0.0', port=5000, load_dotenv=False) + diff --git a/cifr_agent_system/config.py b/cifr_agent_system/config.py new file mode 100644 index 000000000..666c565c8 --- /dev/null +++ b/cifr_agent_system/config.py @@ -0,0 +1,67 @@ +import os +import ssl +from dotenv import load_dotenv + +# Allow opting into insecure SSL only when explicitly requested (for sandboxes). +ALLOW_INSECURE_SSL = os.getenv("ALLOW_INSECURE_SSL", "0") == "1" + +if ALLOW_INSECURE_SSL: + def _no_verify_context(*args, **kwargs): + ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + return ctx + + ssl._create_default_https_context = _no_verify_context + ssl.create_default_context = _no_verify_context + ssl._create_unverified_context = _no_verify_context + +# Ensure SSL_CERT_FILE points to a readable cert bundle (to avoid permission errors +# when the default system cert store is blocked in sandboxed environments). +try: + import certifi + cert_path = certifi.where() + os.environ["SSL_CERT_FILE"] = cert_path + os.environ["REQUESTS_CA_BUNDLE"] = cert_path + os.environ["GRPC_DEFAULT_SSL_ROOTS_FILE_PATH"] = cert_path +except Exception: + pass + +# Load environment variables from .env file; if unreadable (permissions/sandbox), fall back to existing env. +try: + load_dotenv() +except PermissionError: + print("Warning: cifr_agent_system/.env not readable; relying on existing environment variables.") + +class Config: + # GCP Project Settings + GCP_PROJECT_ID = os.getenv("GCP_PROJECT_ID") + GCP_LOCATION = os.getenv("GCP_LOCATION", "us-central1") # Default to us-central1 + GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") # Legacy single key / default + GOOGLE_API_KEY_CA = os.getenv("GOOGLE_API_KEY_CA") # Communication Agent + GOOGLE_API_KEY_FA = os.getenv("GOOGLE_API_KEY_FA") # Friction Detection Agent + GOOGLE_API_KEY_IA = os.getenv("GOOGLE_API_KEY_IA") # Intervention Agent + + # Vertex AI / Gemini Settings + VERTEX_AI_ENDPOINT = f"{GCP_LOCATION}-aiplatform.googleapis.com" + GEMINI_PRO_VISION_MODEL_ID = os.getenv("GEMINI_PRO_VISION_MODEL_ID", "gemini-2.5-flash") + GEMINI_PRO_MODEL_ID = os.getenv("GEMINI_PRO_MODEL_ID", "gemini-2.0-flash") + + # HTTP options for google.genai; disable SSL verification in constrained environments. + GENAI_HTTP_OPTIONS = {"verify": False} + + # OpenAI-Compatible Fallback Service (Secondary - for demo/backup only) + # NOTE: Hackathon judges won't have access to this service + # This is only used as fallback when free tier Google Gemini API hits quota limits + OPENAI_FALLBACK_BASE_URL = os.getenv("OPENAI_FALLBACK_BASE_URL") # e.g., https://caas-gocode-prod.caas-prod.prod.onkatana.net/v1 + OPENAI_FALLBACK_API_KEY = os.getenv("OPENAI_FALLBACK_API_KEY") # OpenAI-compatible API key + OPENAI_FALLBACK_MODEL = os.getenv("OPENAI_FALLBACK_MODEL", "gemini-2.0-flash-001") # Model name on fallback service + ENABLE_OPENAI_FALLBACK = os.getenv("ENABLE_OPENAI_FALLBACK", "0") == "1" # Set to "1" to enable fallback + + # Ensure required environment variables are set + if not GCP_PROJECT_ID: + raise ValueError("GCP_PROJECT_ID environment variable not set.") + if not (GOOGLE_API_KEY or GOOGLE_API_KEY_CA or GOOGLE_API_KEY_FA or GOOGLE_API_KEY_IA): + raise ValueError("No Gemini API key found. Set GOOGLE_API_KEY or per-agent keys GOOGLE_API_KEY_CA/FA/IA. Get your API key from https://aistudio.google.com/app/apikey") + + # Add other configuration variables as needed (e.g., database settings) diff --git a/cifr_agent_system/project_idea.txt b/cifr_agent_system/project_idea.txt new file mode 100644 index 000000000..d3eb465f3 --- /dev/null +++ b/cifr_agent_system/project_idea.txt @@ -0,0 +1,51 @@ +Project Idea: Collaborative Intelligence & Friction Reduction (CIFR) Agent System +1. Concept: +The CIFR Agent System acts as an intelligent intermediary and facilitator for intercompany projects. It uses a network of specialized AI agents to actively monitor communication channels (emails, chat, documents), identify potential friction points (e.g., conflicting statements, stalled decisions, knowledge gaps), and proactively intervene with intelligent suggestions, clarifications, or even by initiating automated actions. The goal is to ensure smoother workflows, clearer understanding, and accelerated progress in complex multi-party collaborations. +2. Multi-Agent System: +Communication & Sentiment Analysis Agent: +Role: Monitors intercompany communication channels. +Functionality: Ingests and analyzes communication data (e.g., shared documents, email threads, chat logs from platforms like Google Chat, Slack). Uses Gemini's advanced NLP capabilities to understand context, extract key decisions, identify action items, and perform sentiment analysis. It flags communication breakdowns, misunderstandings, or rising tensions based on linguistic cues. +Multimodal Aspect: Could analyze images or diagrams shared in documents to extract context (e.g., flowcharts indicating processes, architectural diagrams). Gemini's ability to interpret image content in conjunction with text would be crucial here. +Knowledge & Context Integration Agent: +Role: Builds and maintains a unified understanding of the project, shared knowledge, and historical context. +Functionality: Aggregates information from all communication channels and shared repositories (e.g., Google Drive, Confluence, internal knowledge bases). Uses Gemini's reasoning capabilities to identify implicit dependencies, potential overlaps, or conflicting information across different sources. It can also "learn" about each company's specific terminology, processes, and priorities. +Reasoning/Intellisearch: Employs intellisearch to find relevant past discussions, decisions, or documents when a new query arises or a friction point is detected. It reasons about which pieces of information are most pertinent to resolve a given issue. +Expectation Alignment & Conflict Detection Agent: +Role: Identifies misaligned expectations and potential conflicts before they escalate. +Functionality: By comparing statements, action items, and goals from different collaborating parties, this agent uses Gemini's reasoning to detect discrepancies. For instance, if Company A states a deadline that conflicts with Company B's projected resource availability, this agent flags it. It can also identify subtle conflicts of interest or resource contention. +Agentic Protocols: Could trigger a "Mediation Sub-Agent" or suggest a structured discussion to resolve the conflict, providing all necessary context. +Decision & Action Facilitation Agent: +Role: Accelerates decision-making and ensures accountability for action items. +Functionality: Tracks decisions made, assigns action items, and monitors progress. If a decision is stalled, or an action item is overdue, this agent uses Gemini to gently nudge relevant parties, provide a summary of the outstanding issue, or suggest potential next steps to unblock the process. +Multimodal Aspect: Could generate simple visual summaries of project status, decision trees, or dependency graphs to aid clarity. +Intervention & Suggestion Agent: +Role: Provides proactive assistance and suggestions to reduce friction. +Functionality: Based on the insights from other agents, this agent provides context-aware suggestions. Examples: +"It seems there's a misunderstanding regarding the user authentication module. Here's a summary of the last discussion and the agreed-upon API contract." (Referencing information from the Knowledge Agent). +"The deadline for feature X from Company B seems tight given their current workload. Would you like me to suggest a revised timeline or explore resource sharing options?" (Based on Expectation Alignment Agent's findings). +"Team A is blocked waiting for the data schema from Team B. Here's the relevant documentation and a direct link to their contact." +Reasoning/Automated Reasoning: This agent heavily relies on the reasoning capabilities to understand the friction point, identify the best intervention strategy, and formulate a helpful, context-rich response. +3. Google Cloud, Data Science, & Engineering Team Perspective: +GCP Services: +Communication Ingestion: Cloud Pub/Sub (for real-time chat/email updates), Cloud Storage (for shared documents, historical data). +NLP & Multimodal AI: Vertex AI (for Gemini integration, custom NLP models, fine-tuning for domain-specific terminology), Cloud Natural Language API (for sentiment analysis, entity extraction). +Knowledge Graph/Database: Neo4j on GCP Marketplace, or a custom solution on Cloud Spanner/Firestore for storing interconnected knowledge and relationships between entities. +Orchestration: Cloud Composer (Apache Airflow) for managing complex, event-driven agent workflows. +Deployment: Cloud Run (for serverless microservices for each agent), Google Kubernetes Engine (for more complex, stateful agents). +Security & Compliance: Cloud DLP (for sensitive information detection), Identity and Access Management (IAM) for secure cross-company data access control. +Data Science Focus: Advanced NLP, sentiment analysis, topic modeling, knowledge graph construction, anomaly detection (for communication patterns), predictive modeling (for potential friction points), and active learning for agent improvement. +Engineering Teams: +Microservices Architecture: Encourages a clean, modular design with independent agents. +API-First Approach: Agents communicate via well-defined APIs. +Observability: Comprehensive logging, tracing, and monitoring (Cloud Monitoring, Cloud Logging, Cloud Trace) to understand agent behavior and system health. +Scalability: Leverages GCP's auto-scaling capabilities for various services. +Interoperability: Design for easy integration with existing communication and document management tools used by collaborating companies. +4. Multimodal and Reasoning: +Multimodal: Gemini's ability to interpret not just text but also diagrams, screenshots of UI, or even short video clips within shared documents is vital for a holistic understanding of project context. For instance, if an engineering team shares a screenshot of an error, Gemini could analyze the image and the surrounding text to understand the problem. +Reasoning/Automated Reasoning: The core of this system is the agents' ability to reason. They need to: +Understand nuances in human communication. +Synthesize information from disparate sources. +Identify causal links between actions and outcomes. +Anticipate potential problems and suggest preventive measures. +Formulate intelligent, context-aware interventions to reduce friction. +This CIFR Agent System addresses a real-world business challenge, provides ample opportunity to showcase advanced AI capabilities (especially with Gemini's multimodal and reasoning power), and offers a compelling narrative for a hackathon. diff --git a/cifr_agent_system/requirements.txt b/cifr_agent_system/requirements.txt new file mode 100644 index 000000000..75683a146 --- /dev/null +++ b/cifr_agent_system/requirements.txt @@ -0,0 +1,12 @@ +google-cloud-aiplatform +google-cloud-dlp +google-cloud-pubsub +google-cloud-storage +google-cloud-language +google-genai # Added for direct Gemini API access +openai # For OpenAI-compatible fallback service (optional) +cryptography +pillow +python-dotenv # For local environment variable management +Flask[async] +Flask[async] # For the web UI diff --git a/frontend/Gemini.png b/frontend/Gemini.png new file mode 100644 index 000000000..dbbf48d60 Binary files /dev/null and b/frontend/Gemini.png differ diff --git a/frontend/ODSC-AI.jpeg b/frontend/ODSC-AI.jpeg new file mode 100644 index 000000000..36a96dcdf Binary files /dev/null and b/frontend/ODSC-AI.jpeg differ diff --git a/frontend/static/Gemini.png b/frontend/static/Gemini.png new file mode 100644 index 000000000..dbbf48d60 Binary files /dev/null and b/frontend/static/Gemini.png differ diff --git a/frontend/static/ODSC-AI.jpeg b/frontend/static/ODSC-AI.jpeg new file mode 100644 index 000000000..36a96dcdf Binary files /dev/null and b/frontend/static/ODSC-AI.jpeg differ diff --git a/frontend/static/css/style.css b/frontend/static/css/style.css new file mode 100644 index 000000000..66d1727c6 --- /dev/null +++ b/frontend/static/css/style.css @@ -0,0 +1,551 @@ +/* frontend/static/css/style.css */ +body { + font-family: 'Google Sans', 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + line-height: 1.6; + color: #333; + background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); + margin: 0; + padding: 20px; +} + +.container { + max-width: 1200px; + margin: 30px auto; + background: #fff; + padding: 30px; + border-radius: 12px; + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.12); +} + +/* Header Section with Logos */ +.header-section { + text-align: center; + margin-bottom: 40px; + padding-bottom: 30px; + border-bottom: 2px solid #e8eaed; +} + +.logo-container { + display: flex; + justify-content: center; + align-items: center; + gap: 30px; + margin-bottom: 20px; + flex-wrap: wrap; +} + +.logo { + height: 60px; + width: auto; + object-fit: contain; + transition: transform 0.3s ease; +} + +.logo:hover { + transform: scale(1.05); +} + +.gemini-logo { + max-height: 60px; +} + +.odsc-logo { + max-height: 60px; + border-radius: 8px; +} + +h1 { + color: #202124; + text-align: center; + margin-bottom: 15px; + font-size: 2.5rem; + font-weight: 400; +} + +h2 { + color: #202124; + text-align: center; + margin-bottom: 20px; + font-size: 1.75rem; + font-weight: 400; +} + +h3 { + color: #202124; + margin-bottom: 15px; + font-weight: 500; +} + +p { + margin-bottom: 10px; +} + +.input-section { + background: linear-gradient(135deg, #e8f0fe 0%, #f1f3f4 100%); + padding: 25px; + border-radius: 12px; + margin-bottom: 30px; + border: 1px solid #dadce0; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); +} + +textarea { + width: calc(100% - 20px); + padding: 10px; + margin-bottom: 15px; + border: 1px solid #ccc; + border-radius: 5px; + font-size: 1rem; + box-sizing: border-box; +} + +input[type="file"] { + margin-bottom: 15px; + display: block; + width: 100%; +} + +button { + display: block; + width: 100%; + padding: 12px 20px; + background-color: #4285F4; + color: white; + border: none; + border-radius: 8px; + cursor: pointer; + font-size: 1.1rem; + font-weight: 500; + transition: all 0.3s ease; + box-shadow: 0 2px 4px rgba(66, 133, 244, 0.2); +} + +button:hover { + background-color: #3367d6; + box-shadow: 0 4px 8px rgba(66, 133, 244, 0.3); + transform: translateY(-1px); +} + +#loading { + text-align: center; + padding: 10px; + font-style: italic; + color: #555; +} + +.results-section { + margin-top: 30px; + border-top: 1px solid #eee; + padding-top: 20px; +} + +.result-card img { + max-width: 100%; + height: auto; + border-radius: 5px; + margin-top: 10px; + border: 1px solid #ddd; +} + + +.result-card { + background-color: #f9f9f9; + border: 1px solid #e1e1e1; + border-radius: 8px; + padding: 20px; + margin-bottom: 20px; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); +} + +.result-card h3 { + color: #34495e; + margin-top: 0; + margin-bottom: 15px; + text-align: left; + border-bottom: 1px solid #eee; + padding-bottom: 10px; +} + +.result-card p strong { + color: #555; +} + +#entities_list { + list-style-type: disc; + margin-left: 20px; + padding: 0; +} + +#entities_list li { + margin-bottom: 5px; + color: #666; +} + +pre { + background-color: #eee; + padding: 10px; + border-radius: 5px; + overflow-x: auto; + white-space: pre-wrap; + word-wrap: break-word; +} + +.error { + color: #d9534f; + background-color: #f2dede; + border: 1px solid #ebccd1; + padding: 10px; + border-radius: 5px; + margin-bottom: 20px; +} + +.hidden { + display: none; +} + +/* Orchestration Section Styles */ +.orchestration-section { + background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%); + color: white; + padding: 30px; + border-radius: 10px; + margin-bottom: 30px; + box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); +} + +.orchestration-section h2 { + color: white; + margin-bottom: 15px; + text-align: center; +} + +.orchestration-description { + color: rgba(255, 255, 255, 0.95); + text-align: center; + line-height: 1.8; + margin-bottom: 25px; + font-size: 1.05rem; +} + +.orchestration-description strong { + color: white; + font-weight: 600; +} + +.orchestration-flow { + display: flex; + align-items: center; + justify-content: center; + flex-wrap: wrap; + gap: 10px; + margin-top: 20px; + padding: 20px; + background: rgba(255, 255, 255, 0.15); + backdrop-filter: blur(10px); + border-radius: 8px; +} + +.flow-step { + background: rgba(255, 255, 255, 0.25); + padding: 12px 20px; + border-radius: 8px; + font-weight: 600; + font-size: 0.95rem; + border: 2px solid rgba(255, 255, 255, 0.3); + white-space: nowrap; +} + +.flow-arrow { + font-size: 1.5rem; + font-weight: bold; + color: white; +} + +/* Agent Overview Styles - Google Colors */ +.agents-overview { + background: #ffffff; + padding: 40px 30px; + border-radius: 12px; + margin-bottom: 30px; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08); + border: 1px solid #e8eaed; +} + +.agents-overview h2 { + color: #202124; + margin-bottom: 30px; + font-size: 2rem; + font-weight: 500; +} + +.agent-cards { + display: grid; + grid-template-columns: repeat(4, 1fr); + gap: 20px; + margin-top: 20px; +} + +.agent-card { + background: #ffffff; + padding: 25px 20px; + border-radius: 12px; + border: 2px solid #e8eaed; + transition: all 0.3s ease; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); + text-align: center; +} + +.agent-card:hover { + transform: translateY(-4px); + box-shadow: 0 8px 16px rgba(0, 0, 0, 0.12); +} + +.agent-icon { + font-size: 3rem; + margin-bottom: 15px; + display: block; +} + +.agent-card h3 { + color: #202124; + margin-top: 0; + margin-bottom: 15px; + text-align: center; + font-size: 1.2rem; + font-weight: 500; + border-bottom: none; + padding-bottom: 0; +} + +.agent-card p { + color: #5f6368; + margin: 0; + font-size: 0.95rem; + line-height: 1.6; + text-align: left; +} + +/* Google Brand Colors for Agent Cards */ +.agent-blue { + border-top: 4px solid #4285F4; +} + +.agent-blue:hover { + border-color: #4285F4; + box-shadow: 0 8px 16px rgba(66, 133, 244, 0.2); +} + +.agent-red { + border-top: 4px solid #EA4335; +} + +.agent-red:hover { + border-color: #EA4335; + box-shadow: 0 8px 16px rgba(234, 67, 53, 0.2); +} + +.agent-yellow { + border-top: 4px solid #FBBC04; +} + +.agent-yellow:hover { + border-color: #FBBC04; + box-shadow: 0 8px 16px rgba(251, 188, 4, 0.2); +} + +.agent-green { + border-top: 4px solid #34A853; +} + +.agent-green:hover { + border-color: #34A853; + box-shadow: 0 8px 16px rgba(52, 168, 83, 0.2); +} + +.subtitle { + text-align: center; + color: #5f6368; + font-size: 1.1rem; + margin-bottom: 30px; + font-weight: 400; +} + +/* Demo Section Styles */ +.demo-section { + background-color: #f8f9fa; + padding: 25px; + border-radius: 10px; + margin-bottom: 30px; + border: 2px solid #e9ecef; +} + +.demo-section h2 { + color: #2c3e50; + margin-bottom: 10px; +} + +.demo-description { + color: #666; + margin-bottom: 20px; + text-align: center; +} + +.demo-buttons { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 15px; +} + +.demo-btn { + background: #4285F4; + color: white; + border: none; + padding: 15px 20px; + border-radius: 8px; + cursor: pointer; + font-size: 0.95rem; + font-weight: 500; + transition: all 0.3s ease; + box-shadow: 0 2px 4px rgba(66, 133, 244, 0.2); +} + +.demo-btn:hover { + transform: translateY(-2px); + box-shadow: 0 4px 8px rgba(66, 133, 244, 0.3); + background: #3367d6; +} + +.demo-btn:active { + transform: translateY(0); +} + +.file-label { + display: block; + margin-top: 5px; + margin-bottom: 15px; + color: #666; + font-size: 0.9rem; +} + +/* Loading Spinner - Google Blue */ +.spinner { + border: 4px solid #e8f0fe; + border-top: 4px solid #4285F4; + border-radius: 50%; + width: 40px; + height: 40px; + animation: spin 1s linear infinite; + margin: 0 auto 10px; +} + +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} + +#loading { + text-align: center; + padding: 20px; + background-color: #e8f0fe; + border-radius: 8px; + margin-top: 15px; +} + +#loading p { + margin-top: 10px; + color: #555; + font-style: italic; +} + +/* Enhanced Result Cards - Google Colors */ +.result-card { + background: linear-gradient(to bottom, #ffffff 0%, #f8f9fa 100%); + border-left: 4px solid #4285F4; +} + +#knowledge_update_card { + border-left-color: #EA4335; + background: linear-gradient(to bottom, #fef7f7 0%, #f8f9fa 100%); +} + +.knowledge-description { + margin-top: 10px; + padding: 10px; + background-color: #f0f0f0; + border-radius: 5px; + font-style: italic; + color: #666; + font-size: 0.9rem; +} + +.result-card h3 { + color: #202124; + font-weight: 500; +} + +/* Info Box Styles */ +.info-box { + background-color: #e7f3ff; + border: 1px solid #b3d9ff; + border-radius: 8px; + padding: 15px; + margin-top: 20px; + font-size: 0.9rem; +} + +.info-box h4 { + color: #0066cc; + margin-top: 0; + margin-bottom: 10px; + text-align: left; +} + +.info-box ul { + margin: 10px 0; + padding-left: 20px; +} + +.info-box li { + margin-bottom: 5px; + color: #555; +} + +.info-box small { + color: #666; + font-style: italic; +} + +/* Responsive Design */ +@media (max-width: 1024px) { + .agent-cards { + grid-template-columns: repeat(2, 1fr); + } +} + +@media (max-width: 768px) { + .agent-cards { + grid-template-columns: 1fr; + } + + .demo-buttons { + grid-template-columns: 1fr; + } + + .logo-container { + flex-direction: column; + gap: 15px; + } + + .logo { + height: 50px; + } + + h1 { + font-size: 2rem; + } + + h2 { + font-size: 1.5rem; + } +} + diff --git a/frontend/static/js/script.js b/frontend/static/js/script.js new file mode 100644 index 000000000..6c8756d8e --- /dev/null +++ b/frontend/static/js/script.js @@ -0,0 +1,312 @@ +// frontend/static/js/script.js +document.addEventListener('DOMContentLoaded', () => { + const messageForm = document.getElementById('messageForm'); + const loadingIndicator = document.getElementById('loading'); + const errorMessage = document.getElementById('error_message'); + const textContent = document.getElementById('text_content'); + + // Demo examples data + const demoExamples = { + frustrated: { + text: "I am really frustrated with the constant delays on this feature. We've been waiting for 3 weeks now and the deadline is approaching fast. This is becoming a serious issue for our team and we need to accelerate immediately!", + description: "Shows how the system detects high frustration and suggests clarification interventions." + }, + timeline: { + text: "The project timeline is slipping again. We were supposed to deliver Phase 2 by Friday, but now it looks like it will be delayed by at least another week. This is the third time we've had to push back deadlines. We need to have a serious discussion about resource allocation and priorities.", + description: "Demonstrates timeline friction detection and action item proposals." + }, + positive: { + text: "Great work on the latest sprint! The team really pulled together and we delivered everything on time. The new features are working perfectly and the client is very happy with the progress. Let's keep this momentum going!", + description: "Shows how the system recognizes positive communication and confirms no friction." + }, + conflict: { + text: "I completely disagree with the approach we're taking. The current design doesn't align with our original requirements and I think we're going in the wrong direction. We need to stop and reconsider before we waste more time and resources on this.", + description: "Highlights conflict detection and mediation intervention suggestions." + }, + multimodal: { + text: "This graph shows a significant drop in user engagement over the past month. We went from 85% active users to just 62%. The data suggests there might be an issue with the latest update. Can someone analyze this chart and help us understand what's happening?", + description: "Tests multimodal analysis capabilities with data visualization context." + } + }; + + // Setup demo buttons + document.querySelectorAll('.demo-btn').forEach(btn => { + btn.addEventListener('click', () => { + const exampleKey = btn.getAttribute('data-example'); + const example = demoExamples[exampleKey]; + if (example) { + textContent.value = example.text; + // Scroll to form + messageForm.scrollIntoView({ behavior: 'smooth', block: 'start' }); + // Highlight the textarea briefly + textContent.focus(); + textContent.style.border = '2px solid #667eea'; + setTimeout(() => { + textContent.style.border = '1px solid #ccc'; + }, 2000); + } + }); + }); + + // Result cards + const originalMessageCard = document.getElementById('original_message_card'); + const communicationAnalysisCard = document.getElementById('communication_analysis_card'); + const knowledgeUpdateCard = document.getElementById('knowledge_update_card'); + const frictionDetectionCard = document.getElementById('friction_detection_card'); + const interventionSuggestionCard = document.getElementById('intervention_suggestion_card'); + + messageForm.addEventListener('submit', async (event) => { + event.preventDefault(); // Prevent default form submission + + // Hide previous results and errors + hideAllCards(); + errorMessage.classList.add('hidden'); + errorMessage.textContent = ''; + loadingIndicator.classList.remove('hidden'); + + const formData = new FormData(messageForm); + + // Display submitted message immediately + document.getElementById('original_text').textContent = formData.get('text_content'); + const originalImage = document.getElementById('original_image'); + const imageFile = formData.get('image_file'); + + if (imageFile && imageFile.size > 0) { + const reader = new FileReader(); + reader.onload = (e) => { + originalImage.src = e.target.result; + originalImage.style.display = 'block'; + }; + reader.readAsDataURL(imageFile); + document.getElementById('original_image_container').classList.remove('hidden'); + } else { + originalImage.src = ''; + originalImage.style.display = 'none'; + document.getElementById('original_image_container').classList.add('hidden'); + } + originalMessageCard.classList.remove('hidden'); // Ensure original message card is visible + + try { + // Update loading message + const loadingText = loadingIndicator.querySelector('p'); + if (loadingText) { + loadingText.textContent = '🤖 Processing through AI agents... Analyzing with Gemini AI...'; + } + + const response = await fetch('/api/process_message', { + method: 'POST', + body: formData, + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const result = await response.json(); + loadingIndicator.classList.add('hidden'); + console.log("API Response:", result); + + if (result.error) { + errorMessage.textContent = `Error: ${result.error}`; + errorMessage.classList.remove('hidden'); + return; + } + + // Warnings removed - don't display API quota/fallback information to users + // (Warnings are still logged server-side for debugging) + + displayResults(result); + + // Smooth scroll to results + setTimeout(() => { + document.querySelector('.results-section').scrollIntoView({ + behavior: 'smooth', + block: 'start' + }); + }, 100); + + } catch (error) { + loadingIndicator.classList.add('hidden'); + errorMessage.textContent = `An unexpected error occurred: ${error.message}`; + errorMessage.classList.remove('hidden'); + console.error("Fetch error:", error); + } + }); + + function hideAllCards() { + // originalMessageCard.classList.add('hidden'); // Keep original message card visible + communicationAnalysisCard.classList.add('hidden'); + knowledgeUpdateCard.classList.add('hidden'); + frictionDetectionCard.classList.add('hidden'); + interventionSuggestionCard.classList.add('hidden'); + } + + // checkForAPIErrors function removed - API quota/fallback warnings are not shown to users + + function displayResults(result) { + console.log("Full API result:", result); + + // Parse string responses if needed + let commAnalysis = result.communication_analysis; + let frictionDetection = result.friction_detection; + let interventionSuggestion = result.intervention_suggestion; + + // Try to parse if they're strings (Python dict format) + if (typeof commAnalysis === 'string') { + try { + // Try to parse as JSON first + commAnalysis = JSON.parse(commAnalysis); + } catch (e1) { + try { + // If that fails, try replacing single quotes (Python dict format) + commAnalysis = JSON.parse(commAnalysis.replace(/'/g, '"').replace(/None/g, 'null').replace(/True/g, 'true').replace(/False/g, 'false')); + } catch (e2) { + console.warn("Could not parse communication_analysis:", e2, "Raw:", commAnalysis); + } + } + } + if (typeof frictionDetection === 'string') { + try { + frictionDetection = JSON.parse(frictionDetection); + } catch (e1) { + try { + frictionDetection = JSON.parse(frictionDetection.replace(/'/g, '"').replace(/None/g, 'null').replace(/True/g, 'true').replace(/False/g, 'false')); + } catch (e2) { + console.warn("Could not parse friction_detection:", e2, "Raw:", frictionDetection); + } + } + } + if (typeof interventionSuggestion === 'string') { + try { + interventionSuggestion = JSON.parse(interventionSuggestion); + } catch (e1) { + try { + interventionSuggestion = JSON.parse(interventionSuggestion.replace(/'/g, '"').replace(/None/g, 'null').replace(/True/g, 'true').replace(/False/g, 'false')); + } catch (e2) { + console.warn("Could not parse intervention_suggestion:", e2, "Raw:", interventionSuggestion); + } + } + } + + console.log("Parsed commAnalysis:", commAnalysis); + console.log("Parsed frictionDetection:", frictionDetection); + console.log("Parsed interventionSuggestion:", interventionSuggestion); + + // Display Communication Agent Analysis + if (commAnalysis) { + communicationAnalysisCard.classList.remove('hidden'); + // commAnalysis structure: {analysis: {...}, friction: {...}} + const analysis = commAnalysis.analysis || commAnalysis; + console.log("Extracted analysis:", analysis); + + // Handle nested nlp_analysis structure + const nlpAnalysis = analysis?.nlp_analysis || analysis; + console.log("NLP Analysis:", nlpAnalysis); + + if (nlpAnalysis && nlpAnalysis.sentiment) { + const sentiment = nlpAnalysis.sentiment; + const score = (typeof sentiment === 'object' && sentiment.score !== undefined) ? sentiment.score : + (typeof sentiment === 'number' ? sentiment : 0.0); + const magnitude = (typeof sentiment === 'object' && sentiment.magnitude !== undefined) ? sentiment.magnitude : + (typeof sentiment === 'object' ? Math.abs(score) : 'N/A'); + + document.getElementById('sentiment_score').textContent = typeof score === 'number' ? score.toFixed(2) : (score || 'N/A'); + document.getElementById('sentiment_score').style.color = score < -0.2 ? '#d9534f' : score > 0.2 ? '#5cb85c' : '#f0ad4e'; + document.getElementById('sentiment_magnitude').textContent = typeof magnitude === 'number' ? magnitude.toFixed(2) : (magnitude || 'N/A'); + } else { + document.getElementById('sentiment_score').textContent = 'N/A'; + document.getElementById('sentiment_magnitude').textContent = 'N/A'; + } + + const entitiesList = document.getElementById('entities_list'); + entitiesList.innerHTML = ''; + const entities = nlpAnalysis?.entities || analysis?.entities || []; + if (Array.isArray(entities) && entities.length > 0) { + entities.forEach(entity => { + const listItem = document.createElement('li'); + const entityName = entity.name || 'Unknown'; + const entityType = entity.type_ || entity.type || 'Unknown'; + const salience = entity.salience !== undefined ? entity.salience.toFixed(2) : 'N/A'; + listItem.innerHTML = `${entityName} (${entityType}) - Salience: ${salience}`; + entitiesList.appendChild(listItem); + }); + } else { + entitiesList.innerHTML = '
  • No entities detected
  • '; + } + + const geminiResponse = document.getElementById('gemini_response'); + const geminiText = analysis?.gemini_response_text || analysis?.gemini_response || "No response available"; + geminiResponse.textContent = geminiText; + + // Style response box (no API source indicators shown to users) + geminiResponse.style.backgroundColor = '#eee'; + geminiResponse.style.border = 'none'; + } + + // Display Knowledge Base Update Status + if (result.knowledge_update_status) { + knowledgeUpdateCard.classList.remove('hidden'); + document.getElementById('knowledge_status').textContent = result.knowledge_update_status; + } + + // Display Friction Detection Results + if (frictionDetection) { + frictionDetectionCard.classList.remove('hidden'); + // Handle both boolean and string values + const frictionDetected = frictionDetection.friction_detected === true || + frictionDetection.friction_detected === 'True' || + (typeof frictionDetection.friction_detected === 'string' && frictionDetection.friction_detected.toLowerCase() === 'true'); + + const frictionDetectedEl = document.getElementById('friction_detected'); + frictionDetectedEl.textContent = frictionDetected ? '⚠️ Yes' : '✅ No'; + frictionDetectedEl.style.color = frictionDetected ? '#d9534f' : '#5cb85c'; + frictionDetectedEl.style.fontWeight = 'bold'; + + let frictionReason = frictionDetection.reason || 'No friction detected'; + // Truncate very long reasons for display + if (frictionReason.length > 500) { + frictionReason = frictionReason.substring(0, 500) + '... (truncated)'; + } + document.getElementById('friction_reason').textContent = frictionReason; + + // No API source indicators shown to users + + const severity = frictionDetection.severity; + const severityEl = document.getElementById('friction_severity'); + if (severity !== undefined && severity !== null && severity !== 'N/A') { + const severityNum = typeof severity === 'number' ? severity : parseFloat(severity); + if (!isNaN(severityNum)) { + severityEl.textContent = severityNum.toFixed(2) + ' / 1.0'; + severityEl.style.color = severityNum > 0.7 ? '#d9534f' : severityNum > 0.4 ? '#f0ad4e' : '#5cb85c'; + } else { + severityEl.textContent = 'N/A'; + } + } else { + severityEl.textContent = 'N/A'; + } + } + + // Display Intervention Suggestion + if (interventionSuggestion) { + interventionSuggestionCard.classList.remove('hidden'); + // Handle both boolean and string values + const interventionSuggested = interventionSuggestion.intervention_suggested === true || + interventionSuggestion.intervention_suggested === 'True' || + (typeof interventionSuggestion.intervention_suggested === 'string' && interventionSuggestion.intervention_suggested.toLowerCase() === 'true'); + + const interventionSuggestedEl = document.getElementById('intervention_suggested'); + interventionSuggestedEl.textContent = interventionSuggested ? '💡 Yes' : '✅ No intervention needed'; + interventionSuggestedEl.style.color = interventionSuggested ? '#667eea' : '#5cb85c'; + interventionSuggestedEl.style.fontWeight = 'bold'; + + let suggestionText = interventionSuggestion.suggestion || 'No specific intervention needed at this time.'; + // Truncate very long suggestions for display + if (suggestionText.length > 1000) { + suggestionText = suggestionText.substring(0, 1000) + '... (truncated)'; + } + document.getElementById('intervention_text').textContent = suggestionText; + document.getElementById('intervention_text').style.fontStyle = interventionSuggested ? 'normal' : 'italic'; + } + } +}); + diff --git a/frontend/templates/index.html b/frontend/templates/index.html new file mode 100644 index 000000000..8f102af74 --- /dev/null +++ b/frontend/templates/index.html @@ -0,0 +1,144 @@ + + + + + + CIFR Agent System + + + +
    + +
    +
    + + +
    +

    🤖 CIFR Agent System

    +

    AI-Powered Collaboration Intelligence & Friction Reduction

    +
    + + +
    +

    🎯 CIFR Orchestration System

    +

    + The CIFR (Collaborative Intelligence & Friction Reduction) Agent System is an intelligent orchestration platform + that coordinates multiple specialized AI agents to monitor, analyze, and improve team collaboration. The system processes + collaboration messages through a sophisticated pipeline where each agent plays a critical role in understanding context, + detecting issues, and providing actionable insights. +

    +
    +
    📥 Input
    +
    +
    💬 Communication
    +
    +
    📚 Knowledge
    +
    +
    ⚠️ Friction
    +
    +
    💡 Intervention
    +
    +
    📤 Output
    +
    +
    + + +
    +

    Meet Your AI Agents

    +
    +
    +
    💬
    +

    Communication Agent

    +

    Analyzes messages using Gemini AI to extract sentiment, entities, and context from text and images. Provides deep understanding of communication patterns and emotional tone.

    +
    +
    +
    📚
    +

    Knowledge Agent

    +

    Acts as the central memory hub, storing and retrieving context from all communications. Maintains a knowledge base that enables agents to understand patterns and historical context.

    +
    +
    +
    ⚠️
    +

    Friction Detection Agent

    +

    Identifies misalignments, conflicts, and potential friction points in team communications. Uses AI reasoning to detect subtle patterns and escalation risks.

    +
    +
    +
    💡
    +

    Intervention Agent

    +

    Provides intelligent, actionable suggestions to resolve friction and improve collaboration. Generates context-aware recommendations for clarification, action items, or mediation.

    +
    +
    +
    + + +
    +

    🚀 Try Demo Examples

    +

    Click any example below to see the AI agents in action:

    +
    + + + + + +
    +
    + +
    +

    📝 Custom Message Analysis

    +
    + + + + +
    + +
    + +
    +

    Analysis Results

    + + +
    +

    Submitted Message

    +

    Text:

    + +
    + + + + + + + + +
    +
    + + + diff --git a/images/folder-githb.png b/images/folder-githb.png deleted file mode 100644 index cd7d8d5d5..000000000 Binary files a/images/folder-githb.png and /dev/null differ diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 000000000..3ea5deb8e --- /dev/null +++ b/src/__init__.py @@ -0,0 +1,3 @@ +# Core hackathon modules package + + diff --git a/src/__pycache__/__init__.cpython-311.pyc b/src/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 000000000..5b86ab11b Binary files /dev/null and b/src/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/__pycache__/executor.cpython-311.pyc b/src/__pycache__/executor.cpython-311.pyc new file mode 100644 index 000000000..2ba04dc52 Binary files /dev/null and b/src/__pycache__/executor.cpython-311.pyc differ diff --git a/src/__pycache__/memory.cpython-311.pyc b/src/__pycache__/memory.cpython-311.pyc new file mode 100644 index 000000000..657044c55 Binary files /dev/null and b/src/__pycache__/memory.cpython-311.pyc differ diff --git a/src/__pycache__/planner.cpython-311.pyc b/src/__pycache__/planner.cpython-311.pyc new file mode 100644 index 000000000..15bc4c828 Binary files /dev/null and b/src/__pycache__/planner.cpython-311.pyc differ diff --git a/src/executor.py b/src/executor.py new file mode 100644 index 000000000..a5c9fa2db --- /dev/null +++ b/src/executor.py @@ -0,0 +1,76 @@ +import logging +from typing import Any, Dict, List, Optional +from src.memory import MemoryStore +from src import planner + +logger = logging.getLogger(__name__) + + +class Executor: + """ + Coordinates planning and tool/agent calls. + Expects injected agents to keep dependencies explicit for the hackathon template. + """ + + def __init__( + self, + communication_agent: Any, + friction_detection_agent: Any, + intervention_agent: Any, + knowledge_agent: Any, + memory_store: Optional[MemoryStore] = None, + ): + self.communication_agent = communication_agent + self.friction_detection_agent = friction_detection_agent + self.intervention_agent = intervention_agent + self.knowledge_agent = knowledge_agent + self.memory = memory_store or MemoryStore() + + def execute_plan( + self, + goal: str, + messages: List[Dict[str, Any]], + context: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + plan_result = planner.plan(goal, context) + self.memory.log("plan_created", {"goal": goal, "plan": plan_result}) + + results: List[Dict[str, Any]] = [] + + for step in plan_result["steps"]: + action = step.get("action") + + if action == "analyze_messages": + for message in messages: + analysis = self.communication_agent.process_collaboration_message(message) + self.memory.log("analysis", {"message": message, "analysis": analysis}) + results.append({"step": step["id"], "type": "analysis", "result": analysis}) + + elif action == "detect_friction": + for message in messages: + context_key = f"communication_analysis_{message.get('message_id', 'demo_msg')}" + stored = self.knowledge_agent.retrieve_context(context_key) or {"message": message} + friction = self.friction_detection_agent.detect_misalignment(stored) + self.memory.log("friction_detection", {"message": message, "friction": friction}) + results.append({"step": step["id"], "type": "friction", "result": friction}) + + elif action == "generate_interventions": + for message in messages: + context_key = f"communication_analysis_{message.get('message_id', 'demo_msg')}" + stored = self.knowledge_agent.retrieve_context(context_key) or {"message": message} + friction = stored.get("friction", {}) + intervention = self.intervention_agent.suggest_clarification( + {"message": stored.get("message", {}), "reason": friction.get("reason", "")} + ) + self.memory.log("intervention", {"message": message, "intervention": intervention}) + results.append({"step": step["id"], "type": "intervention", "result": intervention}) + + else: + # Unknown action; log and continue + logger.warning("Skipped unknown action: %s", action) + self.memory.log("skipped_step", {"step": step}) + results.append({"step": step.get("id"), "type": "skipped", "reason": "unknown action"}) + + return {"plan": plan_result, "results": results, "trace": self.memory.latest()} + + diff --git a/src/memory.py b/src/memory.py new file mode 100644 index 000000000..3dde7bf22 --- /dev/null +++ b/src/memory.py @@ -0,0 +1,28 @@ +from datetime import datetime +from typing import Any, Dict, List, Optional + + +class MemoryStore: + """Lightweight in-memory log for executions and agent traces.""" + + def __init__(self): + self.events: List[Dict[str, Any]] = [] + + def log(self, event_type: str, payload: Dict[str, Any]) -> Dict[str, Any]: + entry = { + "event": event_type, + "payload": payload, + "timestamp": datetime.utcnow().isoformat() + "Z", + } + self.events.append(entry) + return entry + + def get_events(self, event_type: Optional[str] = None) -> List[Dict[str, Any]]: + if event_type is None: + return list(self.events) + return [e for e in self.events if e["event"] == event_type] + + def latest(self, n: int = 20) -> List[Dict[str, Any]]: + return self.events[-n:] + + diff --git a/src/planner.py b/src/planner.py new file mode 100644 index 000000000..525e5344f --- /dev/null +++ b/src/planner.py @@ -0,0 +1,76 @@ +import json +import os +from typing import Any, Dict, List, Optional + +try: + import google.genai as genai +except ImportError: + genai = None + + +def _make_client() -> Optional[Any]: + """Create a Gemini client if api key and library are available.""" + api_key = os.getenv("GOOGLE_API_KEY") + if not api_key or genai is None: + return None + try: + return genai.Client(api_key=api_key) + except Exception: + return None + + +def _parse_candidate(raw_text: str) -> List[Dict[str, Any]]: + """Parse Gemini JSON output into a list of steps.""" + try: + data = json.loads(raw_text) + if isinstance(data, dict) and "steps" in data and isinstance(data["steps"], list): + return data["steps"] + if isinstance(data, list): + return data + except Exception: + pass + return [] + + +def plan(goal: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + """ + Produce a task plan for the goal. + Returns {source, steps, raw_response, error}. + """ + context = context or {} + steps: List[Dict[str, Any]] = [] + raw_response = None + error = None + + client = _make_client() + if goal and client: + prompt = ( + "You are a planner. Create 3-6 JSON steps to satisfy the goal. " + "Each step must have: id, action, input, notes, expected_output. " + f"Goal: {goal}\nContext: {json.dumps(context)[:1500]}" + ) + try: + response = client.models.generate_content( + model=os.getenv("GEMINI_PRO_MODEL_ID", "gemini-2.0-flash"), + contents=[{"parts": [{"text": prompt}]}], + ) + if response.candidates and response.candidates[0].content.parts: + raw_response = response.candidates[0].content.parts[0].text + steps = _parse_candidate(raw_response) + except Exception as exc: # pragma: no cover - network/Gemini issues + error = str(exc) + + if not steps: + # Fallback heuristic plan + steps = [ + {"id": "1", "action": "analyze_messages", "input": "ingest and analyze messages", "notes": "use CommunicationAgent", "expected_output": "message analyses"}, + {"id": "2", "action": "detect_friction", "input": "use analyses", "notes": "call FrictionDetectionAgent", "expected_output": "friction report"}, + {"id": "3", "action": "generate_interventions", "input": "friction report", "notes": "call InterventionAgent", "expected_output": "recommended actions"}, + ] + source = "heuristic" + else: + source = "gemini" + + return {"source": source, "steps": steps, "raw_response": raw_response, "error": error} + + diff --git a/test_api_keys.py b/test_api_keys.py new file mode 100644 index 000000000..1140aaf54 --- /dev/null +++ b/test_api_keys.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +"""Test script to check which API keys have exhausted their quota""" + +import os +import sys +import google.genai as genai +from dotenv import load_dotenv + +# Load environment variables +try: + load_dotenv() +except: + pass + +# Import config +sys.path.insert(0, os.path.dirname(__file__)) +from cifr_agent_system.config import Config + +def test_api_key(key_name, api_key, model="gemini-2.0-flash"): + """Test a single API key""" + if not api_key: + return {"status": "not_set", "key_name": key_name} + + try: + client = genai.Client(api_key=api_key) + response = client.models.generate_content( + model=model, + contents=[{"parts": [{"text": "Hello, this is a test."}]}] + ) + + if response.candidates: + return { + "status": "working", + "key_name": key_name, + "key_preview": api_key[:20] + "...", + "response": response.candidates[0].content.parts[0].text[:50] + "..." + } + else: + return {"status": "no_response", "key_name": key_name} + + except Exception as e: + error_str = str(e) + if "429" in error_str or "RESOURCE_EXHAUSTED" in error_str or "quota" in error_str.lower(): + return { + "status": "quota_exhausted", + "key_name": key_name, + "key_preview": api_key[:20] + "...", + "error": "Quota exceeded - 429 RESOURCE_EXHAUSTED" + } + else: + return { + "status": "error", + "key_name": key_name, + "key_preview": api_key[:20] + "...", + "error": str(e)[:100] + } + +def main(): + print("="*70) + print("🔍 CIFR Agent System - API Key Quota Diagnostic") + print("="*70) + print() + + # Test all keys + keys_to_test = [ + ("Default Key (GOOGLE_API_KEY)", Config.GOOGLE_API_KEY), + ("Communication Agent (GOOGLE_API_KEY_CA)", Config.GOOGLE_API_KEY_CA), + ("Friction Detection Agent (GOOGLE_API_KEY_FA)", Config.GOOGLE_API_KEY_FA), + ("Intervention Agent (GOOGLE_API_KEY_IA)", Config.GOOGLE_API_KEY_IA), + ] + + results = [] + for key_name, api_key in keys_to_test: + print(f"Testing {key_name}...", end=" ", flush=True) + result = test_api_key(key_name, api_key) + results.append(result) + + if result["status"] == "working": + print("✅ WORKING") + elif result["status"] == "quota_exhausted": + print("❌ QUOTA EXHAUSTED") + elif result["status"] == "not_set": + print("⚠️ NOT SET") + elif result["status"] == "error": + print(f"❌ ERROR: {result.get('error', 'Unknown error')[:50]}") + else: + print(f"⚠️ {result['status'].upper()}") + + print() + print("="*70) + print("📊 Summary Report") + print("="*70) + print() + + working_keys = [r for r in results if r["status"] == "working"] + exhausted_keys = [r for r in results if r["status"] == "quota_exhausted"] + not_set_keys = [r for r in results if r["status"] == "not_set"] + error_keys = [r for r in results if r["status"] == "error"] + + if working_keys: + print("✅ WORKING KEYS:") + for r in working_keys: + print(f" • {r['key_name']}: {r.get('key_preview', 'N/A')}") + print() + + if exhausted_keys: + print("❌ QUOTA EXHAUSTED KEYS:") + for r in exhausted_keys: + print(f" • {r['key_name']}: {r.get('key_preview', 'N/A')}") + print(f" → {r.get('error', 'Quota limit reached')}") + print() + + if not_set_keys: + print("⚠️ NOT CONFIGURED:") + for r in not_set_keys: + print(f" • {r['key_name']}") + print() + + if error_keys: + print("❌ ERROR KEYS:") + for r in error_keys: + print(f" • {r['key_name']}: {r.get('error', 'Unknown error')}") + print() + + print("="*70) + print("💡 Recommendations:") + print("="*70) + + if exhausted_keys: + print("1. Replace exhausted keys with new API keys from Google AI Studio") + print("2. Wait for quota reset (usually resets every minute/hour)") + print("3. Enable billing for higher quotas") + + if len(working_keys) < 3: + print(f"4. Configure {3 - len(working_keys)} more working keys for full per-agent setup") + + print() + print("🔗 Get new API keys: https://aistudio.google.com/app/apikey") + print("📊 Check usage: https://ai.dev/usage?tab=rate-limit") + print() + +if __name__ == "__main__": + main() +