- {transcript.map((entry, index) => (
+ {transcript.filter(entry => entry.speaker === "Candidate").map((entry, index) => (
Date: Thu, 13 Mar 2025 08:02:53 +0530
Subject: [PATCH 4/4] final_workingpart
---
src/pages/InterviewRoom.jsx | 312 +++++++++++++++++++++++++-----------
1 file changed, 219 insertions(+), 93 deletions(-)
diff --git a/src/pages/InterviewRoom.jsx b/src/pages/InterviewRoom.jsx
index f5f2ed5..494cd7f 100644
--- a/src/pages/InterviewRoom.jsx
+++ b/src/pages/InterviewRoom.jsx
@@ -33,6 +33,8 @@ const InterviewRoom = () => {
const questionTimerRef = useRef(null)
const fullScreenRef = useRef(null)
const transcriptIntervalRef = useRef(null)
+ const recognitionRef = useRef(null)
+ const lastTranscriptRef = useRef([])
// New states for coding challenge
const [showCodingModal, setShowCodingModal] = useState(false)
@@ -83,13 +85,8 @@ const InterviewRoom = () => {
],
})
- // Generate initial questions
- generateQuestions()
-
- // Set up question refresh timer
- questionTimerRef.current = setInterval(() => {
- generateQuestions()
- }, 20000)
+ // Initial API call to get questions
+ sendTranscript()
return () => {
if (questionTimerRef.current) {
@@ -99,6 +96,14 @@ const InterviewRoom = () => {
if (document.fullscreenElement && userRole === "candidate") {
document.exitFullscreen()
}
+ // Stop speech recognition if it's running
+ if (recognitionRef.current) {
+ recognitionRef.current.stop()
+ }
+ // Clear transcript interval
+ if (transcriptIntervalRef.current) {
+ clearInterval(transcriptIntervalRef.current)
+ }
}
}, [])
@@ -192,15 +197,17 @@ const InterviewRoom = () => {
}, [userRole, isFullScreen])
const generateQuestions = () => {
- // In a real app, these would be generated by an LLM based on the resume and transcript
- const newQuestions = [
- "Can you describe a challenging project you worked on at Google?",
- "How do you approach debugging complex issues in a microservices architecture?",
- "What's your experience with state management in React applications?",
- "How do you stay updated with the latest technologies in your field?",
- "Can you explain a situation where you had to make a difficult technical decision?",
+ // This function would typically fetch questions from an API
+ // For now, we'll use mock data
+ const mockQuestions = [
+ "Tell me about a challenging project you worked on recently.",
+ "How do you handle conflicts in a team?",
+ "What's your experience with React hooks?",
+ "Explain the concept of closures in JavaScript.",
+ "How would you optimize a slow-loading website?",
]
- setQuestions(newQuestions)
+
+ setQuestions(mockQuestions)
}
const handlePinQuestion = (question) => {
@@ -217,7 +224,7 @@ const InterviewRoom = () => {
}
const handleRefreshQuestions = () => {
- generateQuestions()
+ sendTranscript()
}
const handleSkillClick = (skill) => {
@@ -405,58 +412,161 @@ const InterviewRoom = () => {
const sendTranscript = async () => {
try {
+ // Get candidate transcript entries
+ const candidateTranscripts = transcript
+ .filter((entry) => entry.speaker === "Candidate")
+ .map((entry) => entry.text)
+
+ // Store current transcript for comparison
+ lastTranscriptRef.current = [...candidateTranscripts]
+
+ console.log("Sending transcript to backend:", candidateTranscripts)
+
const response = await fetch("http://127.0.0.1:5000/generate_question", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
- chunks: transcript.filter(entry => entry.speaker === "Candidate").map(entry => entry.text),
- selected_skills: [],
+ chunks: candidateTranscripts,
+ selected_skills: selectedSkill ? [selectedSkill.name] : [],
}),
})
if (!response.ok) {
const errorText = await response.text()
- throw new Error(errorText)
+ throw new Error(`API error: ${errorText}`)
}
const result = await response.json()
- console.log("Transcript sent successfully:", result)
+ console.log("API response:", result)
+
+ if (result.questions && Array.isArray(result.questions)) {
+ // Filter out empty strings and undefined values
+ const filteredQuestions = result.questions.filter((q) => q && q.trim() !== "")
+ console.log("Setting questions:", filteredQuestions)
+
+ if (filteredQuestions.length > 0) {
+ setQuestions(filteredQuestions)
+ } else {
+ console.warn("No valid questions received from API")
+ // Fallback to mock questions if API returns empty
+ generateQuestions()
+ }
+ } else {
+ console.error("Invalid questions format received:", result)
+ // Fallback to mock questions
+ generateQuestions()
+ }
} catch (error) {
console.error("Error sending transcript:", error)
+ // Fallback to mock questions on error
+ generateQuestions()
}
}
const startListening = () => {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
+ if (!SpeechRecognition) {
+ console.error("Speech recognition not supported in this browser")
+ return
+ }
+
const recognition = new SpeechRecognition()
recognition.continuous = true
recognition.interimResults = true
recognition.lang = "en-US"
recognition.onresult = (event) => {
+ let interimTranscript = ""
let finalTranscript = ""
+
for (let i = event.resultIndex; i < event.results.length; i++) {
- finalTranscript += event.results[i][0].transcript
+ const transcript = event.results[i][0].transcript
+ if (event.results[i].isFinal) {
+ finalTranscript += transcript
+ } else {
+ interimTranscript += transcript
+ }
+ }
+
+ // Only update state if we have a final transcript
+ if (finalTranscript) {
+ console.log("Final speech recognized:", finalTranscript)
+ setTranscript((prevTranscript) => [...prevTranscript, { speaker: "Candidate", text: finalTranscript }])
+
+ // Trigger question generation if we have new transcript content
+ const newTranscriptEntry = { speaker: "Candidate", text: finalTranscript }
+ const updatedTranscript = [...transcript, newTranscriptEntry]
+
+ // Check if we have enough new content to send to backend
+ if (updatedTranscript.length > lastTranscriptRef.current.length + 2) {
+ sendTranscript()
+ }
}
- setTranscript((prevTranscript) => [...prevTranscript, { speaker: "Candidate", text: finalTranscript }])
}
recognition.onerror = (event) => {
console.error("Speech recognition error:", event.error)
+ // Restart recognition on error after a short delay
+ if (isRecording) {
+ setTimeout(() => {
+ if (isRecording && !recognitionRef.current) {
+ startListening()
+ }
+ }, 1000)
+ }
}
- recognition.start()
- transcriptIntervalRef.current = setInterval(sendTranscript, 15000)
+ recognition.onend = () => {
+ console.log("Speech recognition ended")
+ // Restart recognition if we're still recording
+ if (isRecording) {
+ recognition.start()
+ }
+ }
+
+ // Store the recognition instance in the ref
+ recognitionRef.current = recognition
+
+ // Start recognition
+ try {
+ recognition.start()
+ console.log("Speech recognition started")
+ } catch (error) {
+ console.error("Error starting speech recognition:", error)
+ }
+
+ // Set up interval to send transcript to backend
+ if (transcriptIntervalRef.current) {
+ clearInterval(transcriptIntervalRef.current)
+ }
+ transcriptIntervalRef.current = setInterval(sendTranscript, 30000)
}
const stopListening = () => {
+ if (recognitionRef.current) {
+ try {
+ recognitionRef.current.stop()
+ console.log("Speech recognition stopped")
+ } catch (error) {
+ console.error("Error stopping speech recognition:", error)
+ }
+ recognitionRef.current = null
+ }
+
if (transcriptIntervalRef.current) {
clearInterval(transcriptIntervalRef.current)
+ transcriptIntervalRef.current = null
}
+
+ // Send transcript one last time when stopping
+ sendTranscript()
}
+ // Filter out empty questions
+ const displayQuestions = questions.filter((q) => q && q.trim() !== "")
+
return (
{/* Role Confirmation Modal */}
@@ -597,6 +707,7 @@ const InterviewRoom = () => {
- {questions.map((question, index) => (
-
setHoveredQuestion(question)}
- onMouseLeave={() => setHoveredQuestion(null)}
- >
-
{question}
-
- {/* Star rating for interviewer */}
-
-
-
-
{pinnedQuestions.length > 0 && (
@@ -755,23 +873,30 @@ const InterviewRoom = () => {
Live Transcript
- {transcript.filter(entry => entry.speaker === "Candidate").map((entry, index) => (
-
+ {transcript.length > 0 ? (
+ transcript.map((entry, index) => (
-
{entry.speaker}
-
{entry.text}
+
+
{entry.speaker}
+
{entry.text}
+
+ ))
+ ) : (
+
+
No transcript available yet.
+
Start recording to capture the conversation.
- ))}
+ )}
{isRecording && (
@@ -1152,3 +1277,4 @@ const InterviewRoom = () => {
}
export default InterviewRoom
+