-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathwidget_api.py
More file actions
1594 lines (1367 loc) · 57.3 KB
/
widget_api.py
File metadata and controls
1594 lines (1367 loc) · 57.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
PDBOT Widget API Server v3.3.4
==============================
A lightweight Flask API that bridges the React widget to the PDBOT RAG pipeline.
Optimized RAG: precision chunking, 100-word answers, dynamic value retrieval.
Features:
- Contextual memory (session-based chat history)
- RAG-powered responses from Manual for Development Projects 2024
- Multi-class query classification (greeting, ambiguous, off-scope, red-line, abusive)
- Suggested follow-up questions (ChatGPT-style)
- Clarification prompts for vague queries
- Source and passage tracking
- Feedback collection
- Admin status endpoint with Groq controls
- Statistics dashboard endpoint
- Production WSGI server (waitress)
- Localtunnel for mobile access
Endpoints:
POST /chat - Send a query and get a response
POST /feedback/answer - Submit answer feedback
POST /feedback/session - Submit session feedback
POST /memory/clear - Clear session memory
GET /health - Health check
GET /admin/status - Backend status for admin panel
GET /admin/statistics - Detailed usage statistics
GET /admin/groq-status - Groq API status
POST /admin/groq-toggle - Toggle force Groq mode
@author M. Hassan Arif Afridi
@version 3.3.0
"""
import os
import sys
import json
import socket
from datetime import datetime
from typing import Dict, List
from flask import Flask, request, jsonify, session
from flask_cors import CORS
# Add src to path for imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
# Admin password for secure features
ADMIN_PASSWORD = 'nufc'
# Import PDBOT modules
from rag_langchain import search_sentences
from models.local_model import LocalModel
from utils.text_utils import find_exact_locations
# Import classifier and templates for off-scope/red-line detection
from core.multi_classifier import MultiClassifier
from core.templates import get_guardrail_response
from core.comparisons import get_comparison_response
# Groq API support (optional)
try:
from groq import Groq
GROQ_AVAILABLE = True
except ImportError:
GROQ_AVAILABLE = False
print("[Widget API] Groq not installed - Groq mode disabled")
# PDF path for exact mode
PDF_PATH = os.path.join(os.path.dirname(__file__), 'data', 'uploads', 'Manual-for-Development-Project-2024.pdf')
RAW_PAGES_CACHE = None
def load_pdf_pages():
"""Load PDF pages for exact mode search."""
global RAW_PAGES_CACHE
if RAW_PAGES_CACHE is not None:
return RAW_PAGES_CACHE
pages = []
try:
import fitz # PyMuPDF
if os.path.exists(PDF_PATH):
doc = fitz.open(PDF_PATH)
for i in range(len(doc)):
pages.append(doc.load_page(i).get_text("text") or "")
doc.close()
print(f"[Widget API] Loaded {len(pages)} PDF pages for exact mode")
except Exception as e:
print(f"[Widget API] Could not load PDF: {e}")
RAW_PAGES_CACHE = pages
return pages
app = Flask(__name__)
app.secret_key = 'pcbot-secure-key-2026-nufc' # Required for sessions
CORS(app, supports_credentials=True) # Enable CORS with credentials for sessions
# Serve mobile page at root for Cloudflare tunnel
@app.route('/')
def serve_landing():
"""Serve landing page with all options"""
try:
with open('public/html/landing.html', 'r', encoding='utf-8') as f:
return f.read(), 200, {'Content-Type': 'text/html'}
except FileNotFoundError:
return jsonify({"error": "Landing page not found", "status": "ok", "api": "/chat"}), 200
@app.route('/mobile.html')
def serve_mobile():
"""Serve mobile-friendly chat page"""
try:
with open('public/html/mobile.html', 'r', encoding='utf-8') as f:
return f.read(), 200, {'Content-Type': 'text/html'}
except FileNotFoundError:
return jsonify({"error": "Mobile page not found", "status": "ok", "api": "/chat"}), 200
# Serve the standalone widget page
@app.route('/widget-standalone.html')
def serve_widget_standalone():
"""Serve the standalone shareable widget page"""
try:
with open('public/html/widget-standalone.html', 'r', encoding='utf-8') as f:
return f.read(), 200, {'Content-Type': 'text/html'}
except FileNotFoundError:
return jsonify({"error": "Widget standalone page not found"}), 404
# Serve the full Widget UI from the API server (password protected)
@app.route('/widget')
def serve_widget():
"""Serve the full React widget UI (dev mode - requires admin access)"""
# Check if user is authenticated
if not session.get('admin_authenticated'):
# Return password protection page
return '''
<!DOCTYPE html>
<html>
<head>
<title>PCBot Development Widget - Admin Access</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
background: linear-gradient(135deg, #1a472a 0%, #2d5f3f 100%);
}
.login-box {
background: white;
padding: 40px;
border-radius: 12px;
box-shadow: 0 4px 20px rgba(0,0,0,0.2);
text-align: center;
max-width: 400px;
}
h1 { color: #1a472a; margin-bottom: 10px; }
.subtitle { color: #666; margin-bottom: 30px; }
input {
width: 100%;
padding: 12px;
margin: 10px 0;
border: 2px solid #ddd;
border-radius: 6px;
font-size: 16px;
box-sizing: border-box;
}
input:focus { border-color: #1a472a; outline: none; }
button {
width: 100%;
padding: 12px;
background: #1a472a;
color: white;
border: none;
border-radius: 6px;
font-size: 16px;
cursor: pointer;
margin-top: 10px;
}
button:hover { background: #2d5f3f; }
.error { color: #d32f2f; margin-top: 10px; display: none; }
.back-link { margin-top: 20px; }
.back-link a { color: #1a472a; text-decoration: none; }
.back-link a:hover { text-decoration: underline; }
</style>
</head>
<body>
<div class="login-box">
<h1>🔒 Admin Access Required</h1>
<p class="subtitle">PCBot Development Widget</p>
<form id="loginForm">
<input type="password" id="password" placeholder="Enter admin code" required>
<button type="submit">Access Widget</button>
</form>
<p class="error" id="error">❌ Invalid code. Please try again.</p>
<div class="back-link">
<a href="/">← Back to Landing Page</a>
</div>
</div>
<script>
document.getElementById('loginForm').addEventListener('submit', async (e) => {
e.preventDefault();
const password = document.getElementById('password').value;
const response = await fetch('/admin/authenticate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
credentials: 'include',
body: JSON.stringify({ password })
});
const result = await response.json();
if (result.success) {
window.location.reload();
} else {
document.getElementById('error').style.display = 'block';
document.getElementById('password').value = '';
}
});
</script>
</body>
</html>
''', 200, {'Content-Type': 'text/html'}
try:
# Serve widget-dev.html which loads the built widget
with open('public/html/widget-dev.html', 'r', encoding='utf-8') as f:
return f.read(), 200, {'Content-Type': 'text/html'}
except FileNotFoundError:
return "Widget dev page not found. Make sure widget-dev.html exists.", 404
# Serve widget static assets (JS, CSS)
@app.route('/src/<path:filename>')
def serve_widget_src(filename):
"""Serve widget source files for dev mode"""
from flask import send_from_directory
return send_from_directory(os.path.join(os.path.dirname(__file__), 'frontend-widget', 'src'), filename)
@app.route('/dist/<path:filename>')
def serve_widget_dist(filename):
"""Serve widget dist files"""
from flask import send_from_directory
return send_from_directory(os.path.join(os.path.dirname(__file__), 'frontend-widget', 'dist'), filename)
@app.route('/assets/<path:filename>')
def serve_widget_assets(filename):
"""Serve widget assets (logos, images)"""
from flask import send_from_directory
# Try public/assets first, then fallback to frontend-widget assets
public_assets = os.path.join(os.path.dirname(__file__), 'public', 'assets')
if os.path.exists(os.path.join(public_assets, filename)):
return send_from_directory(public_assets, filename)
return send_from_directory(os.path.join(os.path.dirname(__file__), 'frontend-widget', 'src', 'assets'), filename)
@app.route('/@<path:rest>')
def serve_vite_deps(rest):
"""Handle Vite dev dependencies"""
return "Dev mode not supported via API", 404
# Initialize model and classifier
model = None
groq_client = None
classifier = None
# Session memory store (in-memory, per-session chat history)
# Format: { session_id: [ { "role": "user/bot", "content": "...", "timestamp": "..." }, ... ] }
session_memory: Dict[str, List[Dict]] = {}
# Maximum messages to keep in memory per session
MAX_MEMORY_MESSAGES = 20
def get_classifier():
"""Lazy load the classifier"""
global classifier
if classifier is None:
classifier = MultiClassifier()
return classifier
def get_model():
"""Lazy load the model"""
global model
if model is None:
model = LocalModel()
return model
def get_groq_client():
"""Lazy load Groq client"""
global groq_client
if groq_client is None and GROQ_AVAILABLE:
api_key = os.environ.get('GROQ_API_KEY')
if api_key:
groq_client = Groq(api_key=api_key)
else:
print("[Widget API] Warning: GROQ_API_KEY not set")
return groq_client
def generate_groq_response(query: str, context: str, page: int = 0) -> str:
"""
v3.3.0: Generate response using Groq API with strict formatting.
Same guardrails as local model - 45-70 words, direct answer first.
"""
client = get_groq_client()
if not client:
return "⚠️ Groq API not available. Please set GROQ_API_KEY environment variable."
try:
# v3.3.0: Strict system prompt matching local model
system_prompt = """You are PCBot, the official Planning Commission assistant for the Manual for Development Projects 2024.
Your answers must ALWAYS follow these rules:
1. Length: 45-70 words maximum.
2. Use ONLY the retrieved context. No outside knowledge.
3. Give the direct answer FIRST, no background theory.
4. No warnings, no disclaimers, no template markers.
5. If numbers exist in the context, extract them completely.
6. If answer truly not found, say: "Not found in the Manual."
Always end with one line:
Source: Manual for Development Projects 2024, p.<page>"""
# v3.3.0: Strict user prompt
user_prompt = f"""Context from the Manual:
{context[:2500]}
Question: {query}
Answer in 45-70 words. Extract numbers if present. Direct answer first:"""
response = client.chat.completions.create(
model="llama-3.1-8b-instant",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
max_tokens=200, # Reduced from 500 to prevent over-explanation
temperature=0.2 # Lower temp for more focused answers
)
answer = response.choices[0].message.content or ""
# v3.3.2: Apply sanitization - allow up to 100 words for complete answers
import re
# Remove existing citations (we'll add clean one)
answer = re.sub(r"\n*Source:.*$", "", answer, flags=re.IGNORECASE | re.MULTILINE)
# Remove filler phrases
fillers = [
r"^(?:According to the (?:provided )?(?:context|manual|text),?\s*)",
r"^(?:Based on the (?:provided )?(?:context|manual|text),?\s*)",
]
for filler in fillers:
answer = re.sub(filler, "", answer, flags=re.IGNORECASE)
# v3.3.2: Allow up to 100 words to avoid cutting mid-sentence/number
words = answer.split()
if len(words) > 100:
answer = " ".join(words[:100])
# Try to end at sentence boundary
last_period = answer.rfind(".")
if last_period > len(answer) * 0.5:
answer = answer[:last_period + 1]
elif not answer.rstrip().endswith(('.', '!', '?')):
answer = answer.rstrip(".!?,;") + "."
answer = answer.strip()
# Add clean citation
doc_name = "Manual for Development Projects 2024"
if page and page > 0:
answer += f"\n\nSource: {doc_name}, p.{page}"
else:
answer += f"\n\nSource: {doc_name}"
return answer
except Exception as e:
print(f"[Groq API] Error: {e}")
return f"⚠️ Groq API error: {str(e)}"
def get_session_history(session_id: str) -> List[Dict]:
"""Get chat history for a session"""
if session_id not in session_memory:
session_memory[session_id] = []
return session_memory[session_id]
def add_to_session_history(session_id: str, role: str, content: str):
"""Add a message to session history"""
history = get_session_history(session_id)
history.append({
"role": role,
"content": content,
"timestamp": datetime.now().isoformat()
})
# Keep only last N messages to prevent memory overflow
if len(history) > MAX_MEMORY_MESSAGES:
session_memory[session_id] = history[-MAX_MEMORY_MESSAGES:]
def build_context_with_memory(session_id: str, current_query: str) -> str:
"""Build context string including recent chat history for contextual understanding"""
history = get_session_history(session_id)
if not history:
return ""
# Get last 3 exchanges (6 messages) for context
recent = history[-6:]
context_parts = []
for msg in recent:
role_label = "User" if msg["role"] == "user" else "Assistant"
context_parts.append(f"{role_label}: {msg['content'][:200]}")
return "Previous conversation:\n" + "\n".join(context_parts)
def clear_session_memory(session_id: str):
"""Clear memory for a session"""
if session_id in session_memory:
del session_memory[session_id]
# =====================================================
# SUGGESTED FOLLOW-UP QUESTIONS (ChatGPT-style)
# =====================================================
# v2.5.0-patch1: Comprehensive topic-based question suggestions
FOLLOW_UP_QUESTIONS = {
"greeting": [
"What is PC-I?",
"What are the DDWP approval limits?",
"How does project approval work?",
"What is ECNEC?",
],
"ambiguous": [
"What is the purpose of PC-I?",
"What is the approval hierarchy for projects?",
"What are the different project phases?",
"How is project cost estimated?",
],
# PC-I related
"pc-i": [
"What are the components of PC-I?",
"How to prepare a PC-I document?",
"What is the approval process for PC-I?",
"What attachments are required for PC-I?",
"What is the difference between PC-I and PC-II?",
],
# PC-II related
"pc-ii": [
"When is PC-II required?",
"What is the purpose of feasibility studies in PC-II?",
"What cost limits apply to PC-II?",
"How to submit PC-II for approval?",
],
# PC-III related
"pc-iii": [
"What is PC-III used for?",
"How often should PC-III be submitted?",
"What information is included in PC-III?",
"Who reviews PC-III reports?",
],
# PC-IV related
"pc-iv": [
"What is PC-IV?",
"When is PC-IV prepared?",
"What is project completion report?",
"What metrics are in PC-IV?",
],
# PC-V related
"pc-v": [
"What is PC-V evaluation?",
"When is PC-V conducted?",
"What is post-completion evaluation?",
"How is project impact measured?",
],
# Approval bodies
"ddwp": [
"What is the DDWP approval limit?",
"Who chairs the DDWP meeting?",
"What projects go to DDWP?",
"How is DDWP different from CDWP?",
],
"cdwp": [
"What is the CDWP approval threshold?",
"Who are the members of CDWP?",
"What projects require CDWP approval?",
"How to submit projects to CDWP?",
],
"ecnec": [
"What is the ECNEC approval limit?",
"Who chairs ECNEC meetings?",
"What projects go to ECNEC?",
"What is the ECNEC approval process?",
],
# Numeric/financial queries
"numeric_query": [
"What are the threshold limits for CDWP?",
"What is the ECNEC approval limit?",
"What is the maximum DDWP approval limit?",
"How is project cost calculated?",
],
# Definition queries
"definition_query": [
"What are the types of PC proformas?",
"What is the difference between PC-I and PC-II?",
"What is PSDP?",
"How is a project defined in the Manual?",
],
# Comparison queries
"comparison_query": [
"What is the difference between DDWP and CDWP?",
"How does PC-I differ from PC-II?",
"What is the difference between federal and provincial projects?",
"How is ADP different from PSDP?",
],
# Procedure queries
"procedure_query": [
"What are the stages of project approval?",
"What documents are required for PC-I?",
"How does project revision work?",
"What is the project cycle?",
],
# Compliance queries
"compliance_query": [
"What are the audit requirements?",
"How is project transparency ensured?",
"What records must be maintained?",
"What are the PC-I format requirements?",
],
# Monitoring queries
"monitoring_evaluation": [
"What are the project monitoring KPIs?",
"How is project progress tracked?",
"What is the role of M&E Division?",
"How often are projects reviewed?",
],
# Budget/PSDP
"budget": [
"What is PSDP?",
"How are funds allocated to projects?",
"What is the budget release process?",
"How is project cost overrun handled?",
],
# General
"general": [
"What is the role of Planning Commission?",
"What is PSDP?",
"How are federal projects approved?",
"What is project monitoring?",
"What is the project approval hierarchy?",
],
}
def get_suggested_questions(query_class: str, query: str = "") -> List[str]:
"""
Generate suggested follow-up questions based on query type.
Args:
query_class: Classification result
query: Original query for context
Returns:
List of 3 suggested questions
"""
import random
# Check if query mentions specific topics
q_lower = query.lower()
pool = []
# v2.5.0-patch1: Better topic detection
if "pc-i" in q_lower or "pc1" in q_lower or "pc 1" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("pc-i", [])
elif "pc-ii" in q_lower or "pc2" in q_lower or "pc 2" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("pc-ii", [])
elif "pc-iii" in q_lower or "pc3" in q_lower or "pc 3" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("pc-iii", [])
elif "pc-iv" in q_lower or "pc4" in q_lower or "pc 4" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("pc-iv", [])
elif "pc-v" in q_lower or "pc5" in q_lower or "pc 5" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("pc-v", [])
elif "ddwp" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("ddwp", [])
elif "cdwp" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("cdwp", [])
elif "ecnec" in q_lower or "nec" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("ecnec", [])
elif "psdp" in q_lower or "budget" in q_lower or "fund" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("budget", [])
elif "monitor" in q_lower or "evaluation" in q_lower or "m&e" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("monitoring_evaluation", [])
elif "differ" in q_lower or "compare" in q_lower or "vs" in q_lower:
pool = FOLLOW_UP_QUESTIONS.get("comparison_query", [])
elif query_class in FOLLOW_UP_QUESTIONS:
pool = FOLLOW_UP_QUESTIONS[query_class]
else:
pool = FOLLOW_UP_QUESTIONS.get("general", [])
# Add some variety by mixing with general questions
general = FOLLOW_UP_QUESTIONS.get("general", [])
combined = list(set(pool + general))
# Return 3 random questions, avoiding the current query
suggestions = [q for q in combined if q.lower() not in query.lower()]
random.shuffle(suggestions)
return suggestions[:3]
def generate_contextual_followups(query: str, answer: str, query_class: str) -> List[str]:
"""
Generate contextual follow-up questions based on the answer.
Args:
query: Original user query
answer: Bot's response
query_class: Classification result
Returns:
List of 3 contextual follow-up questions
"""
followups = []
q_lower = query.lower()
a_lower = answer.lower()
# v2.5.0-patch1: Enhanced contextual suggestions
# PC proformas
if "pc-i" in a_lower and "pc-i" not in q_lower:
followups.append("What are the mandatory sections of PC-I?")
if "pc-ii" in a_lower and "pc-ii" not in q_lower:
followups.append("When is PC-II required?")
if "pc-iii" in a_lower and "pc-iii" not in q_lower:
followups.append("What is PC-III used for?")
if "pc-iv" in a_lower and "pc-iv" not in q_lower:
followups.append("What is the purpose of PC-IV?")
if "pc-v" in a_lower and "pc-v" not in q_lower:
followups.append("When is PC-V evaluation conducted?")
# Approval bodies
if "ddwp" in a_lower and "ddwp" not in q_lower:
followups.append("What is the DDWP approval threshold?")
if "cdwp" in a_lower and "cdwp" not in q_lower:
followups.append("What projects go to CDWP?")
if "ecnec" in a_lower and "ecnec" not in q_lower:
followups.append("What is the ECNEC approval limit?")
# Financial/process topics
if ("approval" in a_lower or "approved" in a_lower) and "approval" not in q_lower:
followups.append("What is the project approval hierarchy?")
if ("cost" in a_lower or "budget" in a_lower) and "cost" not in q_lower:
followups.append("How is project cost estimated?")
if "monitoring" in a_lower and "monitoring" not in q_lower:
followups.append("What are the project monitoring KPIs?")
if "psdp" in a_lower and "psdp" not in q_lower:
followups.append("How are PSDP funds allocated?")
if "revision" in a_lower and "revision" not in q_lower:
followups.append("What is the project revision process?")
# For comparison queries, suggest related comparisons
if query_class in ["comparison_query", "numeric_query", "definition_query"]:
if "ddwp" in q_lower or "cdwp" in q_lower:
followups.append("What is the difference between CDWP and ECNEC?")
if "pc-i" in q_lower or "pc-ii" in q_lower:
followups.append("What are the different PC proformas?")
# Fill remaining slots with topic-based suggestions
if len(followups) < 3:
additional = get_suggested_questions(query_class, query)
for q in additional:
if q not in followups:
followups.append(q)
if len(followups) >= 3:
break
return followups[:3]
# v2.5.0-patch1: Long answer handling
MAX_ANSWER_WORDS = 250 # If answer exceeds this, suggest manual reference
def handle_long_answer(answer: str, sources: List[Dict], query: str) -> str:
"""
Check if answer is too long and add page reference suggestion.
Args:
answer: The generated answer
sources: List of source dictionaries with page info
query: Original user query
Returns:
Modified answer with page reference if too long
"""
word_count = len(answer.split())
if word_count > MAX_ANSWER_WORDS:
# Get unique page numbers from sources
pages = list(set(str(s.get('page', '?')) for s in sources if s.get('page')))
pages_str = ", ".join(sorted(pages)) if pages else "the relevant sections"
# Add a note about detailed information
truncation_note = f"\n\n📖 **Note:** This is a summary. For detailed information, please refer to **pages {pages_str}** in the Manual for Development Projects 2024."
answer += truncation_note
return answer
@app.route('/chat', methods=['POST'])
def chat():
"""
Handle chat requests from the widget with contextual memory.
Request:
{
"query": "What is PC-I?",
"session_id": "uuid",
"clear_memory": false, // Optional: clear session memory
"exact_mode": false, // Optional: return raw passages
"use_groq": false // Optional: use Groq API
}
Response:
{
"answer": "...",
"sources": [...],
"passages": [...],
"mode": "local|exact|groq"
}
"""
try:
data = request.get_json()
query = data.get('query', '').strip()
session_id = data.get('session_id', 'widget-session')
clear_memory = data.get('clear_memory', False)
exact_mode = data.get('exact_mode', False)
use_groq = data.get('use_groq', False)
# Handle memory clear request
if clear_memory:
clear_session_memory(session_id)
return jsonify({
'answer': 'Chat memory cleared.',
'sources': [],
'passages': []
})
if not query:
return jsonify({
'answer': 'Please enter a question.',
'sources': [],
'passages': []
}), 400
print(f"[Widget API] Query: {query[:50]}... (Session: {session_id[:8]}...)")
# =====================================================
# STEP 1: CLASSIFY QUERY (before RAG)
# =====================================================
query_classifier = get_classifier()
classification = query_classifier.classify(query)
query_class = classification.query_class
print(f"[Widget API] Classification: {query_class}/{classification.subcategory}")
# Handle guardrail classes (greeting, ambiguous, off-scope, red-line, abusive)
if query_class in ["greeting", "ambiguous", "off_scope", "red_line", "abusive"]:
guardrail_response = get_guardrail_response(query_class, classification.subcategory or "", query)
add_to_session_history(session_id, "user", query)
add_to_session_history(session_id, "bot", guardrail_response)
return jsonify({
'answer': guardrail_response,
'sources': [],
'passages': [],
'mode': 'guardrail',
'classification': query_class,
'suggested_questions': get_suggested_questions(query_class) if query_class in ["greeting", "ambiguous"] else []
})
# =====================================================
# STEP 2: CHECK FOR COMPARISON QUERY (use pre-built templates)
# =====================================================
if query_class in ["comparison_query", "numeric_query", "definition_query"]:
comparison_response = get_comparison_response(query)
if comparison_response:
add_to_session_history(session_id, "user", query)
add_to_session_history(session_id, "bot", comparison_response)
followups = get_suggested_questions(query_class, query)
return jsonify({
'answer': comparison_response,
'sources': [{'title': 'Manual for Development Projects 2024', 'page': 'Various', 'relevance': 100}],
'passages': [],
'mode': 'comparison_template',
'classification': query_class,
'suggested_questions': followups
})
# If no template match, fall through to RAG
# Get conversation context from memory
conversation_context = build_context_with_memory(session_id, query)
# Add user message to memory
add_to_session_history(session_id, "user", query)
# Get RAG results
rag_results = search_sentences(query, top_k=3)
if not rag_results:
no_result_answer = "I couldn't find relevant information in the manual for your question. Please try rephrasing or ask about Planning & Development topics."
add_to_session_history(session_id, "bot", no_result_answer)
return jsonify({
'answer': no_result_answer,
'sources': [],
'passages': []
})
# Build context from RAG results
context_parts = []
sources = []
passages = [] # Store full passage details
for i, result in enumerate(rag_results[:3]):
text = result.get('text', result.get('content', ''))
page = result.get('page', result.get('metadata', {}).get('page', 'N/A'))
score = result.get('score', result.get('relevance', 0))
source = f"Manual for Development Projects 2024, p.{page}"
context_parts.append(f"[{i+1}] {text}")
sources.append({
'title': 'Manual for Development Projects 2024',
'page': page,
'relevance': round(score * 100) if score else 0
})
passages.append({
'text': text,
'page': page,
'relevance': round(score * 100) if score else 0
})
rag_context = "\n\n".join(context_parts)
# EXACT MODE: Find exact locations like Streamlit version
if exact_mode:
pdf_pages = load_pdf_pages()
exact_locations = find_exact_locations(query, pdf_pages, max_results=5)
if exact_locations:
exact_answer = "✅ **Answer:**\n\n"
for loc in exact_locations[:3]:
page = loc.get('page', '?')
para = loc.get('paragraph', '?')
line = loc.get('line', '?')
sentence = loc.get('sentence', '')
exact_answer += f"**Pg {page}, Para {para}, Line {line}:** \"{sentence}\"\n\n"
exact_answer += "📘 **Source:**\n"
for loc in exact_locations[:3]:
exact_answer += f"Page {loc.get('page', '?')} – Paragraph {loc.get('paragraph', '?')} – Line {loc.get('line', '?')}\n"
exact_sources = [{
'title': 'Manual for Development Projects 2024',
'page': loc.get('page', '?'),
'paragraph': loc.get('paragraph', '?'),
'line': loc.get('line', '?')
} for loc in exact_locations[:3]]
exact_passages = [{
'text': loc.get('sentence', ''),
'page': loc.get('page', '?'),
'paragraph': loc.get('paragraph', '?'),
'line': loc.get('line', '?')
} for loc in exact_locations[:3]]
else:
exact_answer = "📖 **No exact match found. Here are related passages:**\n\n"
for i, p in enumerate(passages, 1):
exact_answer += f"**[{i}] Page {p['page']}** (Relevance: {p['relevance']}%)\n"
exact_answer += f"{p['text']}\n\n"
exact_sources = sources
exact_passages = passages
add_to_session_history(session_id, "bot", exact_answer)
print(f"[Widget API] Exact Mode response")
return jsonify({
'answer': exact_answer,
'sources': exact_sources,
'passages': exact_passages,
'mode': 'exact'
})
# Combine conversation context with RAG context for better understanding
full_context = rag_context
if conversation_context:
full_context = f"{conversation_context}\n\n---\n\nRelevant information from Manual:\n{rag_context}"
# Extract page from first source for citation
first_page = sources[0].get('page', 0) if sources else 0
# GROQ MODE: Use Groq API for responses
# v3.3.0: Check global force Groq mode OR request-level use_groq
if use_groq or FORCE_GROQ_MODE:
answer = generate_groq_response(query, full_context, page=first_page)
response_mode = 'groq'
else:
# Generate answer using local model
# Use higher max_new_tokens to avoid truncation
llm = get_model()
answer = llm.generate_response(query, full_context, max_new_tokens=200, page=first_page)
response_mode = 'local'
# Clean up answer
if answer:
answer = answer.strip()
# Remove any prefix like "Answer:" if present
for prefix in ["Answer:", "✅ Answer:", "Response:"]:
if answer.startswith(prefix):
answer = answer[len(prefix):].strip()
final_answer = answer or "I couldn't generate a response. Please try again."
# v2.5.0-patch1: Handle long answers by adding page reference
final_answer = handle_long_answer(final_answer, sources, query)
# Add bot response to memory
add_to_session_history(session_id, "bot", final_answer)
# Generate contextual follow-up questions
suggested_questions = generate_contextual_followups(query, final_answer, query_class)
print(f"[Widget API] Response generated ({len(final_answer)} chars, mode: {response_mode})")
return jsonify({
'answer': final_answer,
'sources': sources,
'passages': passages,
'mode': response_mode,
'suggested_questions': suggested_questions
})
except Exception as e:
print(f"[Widget API] Error: {e}")
import traceback
traceback.print_exc()
return jsonify({
'answer': f'Sorry, an error occurred: {str(e)}',
'sources': [],
'passages': []
}), 500
@app.route('/feedback/answer', methods=['POST'])
def answer_feedback():
"""
Save feedback for a specific answer.
Request:
{
"messageId": "...",
"query": "...",
"answer": "...",
"type": "like" | "dislike",
"reasonId": "...",
"sessionId": "...",
"timestamp": "..."
}
"""
try:
data = request.get_json()
# Create feedback directory if needed
feedback_dir = os.path.join(os.path.dirname(__file__), 'feedback', 'widget_answers')
os.makedirs(feedback_dir, exist_ok=True)
# Save feedback
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"answer_{timestamp}_{data.get('type', 'unknown')}.json"
filepath = os.path.join(feedback_dir, filename)
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
print(f"[Widget API] Answer feedback saved: {filename}")
return jsonify({'success': True})
except Exception as e:
print(f"[Widget API] Feedback error: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/feedback/session', methods=['POST'])
def session_feedback():
"""
Save session feedback (rating, review).
Request:
{
"rating": 1-3,
"username": "...",
"review": "...",
"sessionId": "...",