diff --git a/src/pages/InterviewRoom.jsx b/src/pages/InterviewRoom.jsx
index 1dad972..494cd7f 100644
--- a/src/pages/InterviewRoom.jsx
+++ b/src/pages/InterviewRoom.jsx
@@ -32,6 +32,9 @@ const InterviewRoom = () => {
const videoRef = useRef(null)
const questionTimerRef = useRef(null)
const fullScreenRef = useRef(null)
+ const transcriptIntervalRef = useRef(null)
+ const recognitionRef = useRef(null)
+ const lastTranscriptRef = useRef([])
// New states for coding challenge
const [showCodingModal, setShowCodingModal] = useState(false)
@@ -82,21 +85,8 @@ const InterviewRoom = () => {
],
})
- // Mock transcript data
- const mockTranscript = [
- { speaker: "Interviewer", text: "Hello! Thanks for joining us today. How are you doing?" },
- { speaker: "Candidate", text: "I'm doing well, thank you for having me. I'm excited to be here." },
- { speaker: "Interviewer", text: "Great! Let's start by discussing your experience with React." },
- ]
- setTranscript(mockTranscript)
-
- // Generate initial questions
- generateQuestions()
-
- // Set up question refresh timer
- questionTimerRef.current = setInterval(() => {
- generateQuestions()
- }, 20000)
+ // Initial API call to get questions
+ sendTranscript()
return () => {
if (questionTimerRef.current) {
@@ -106,6 +96,14 @@ const InterviewRoom = () => {
if (document.fullscreenElement && userRole === "candidate") {
document.exitFullscreen()
}
+ // Stop speech recognition if it's running
+ if (recognitionRef.current) {
+ recognitionRef.current.stop()
+ }
+ // Clear transcript interval
+ if (transcriptIntervalRef.current) {
+ clearInterval(transcriptIntervalRef.current)
+ }
}
}, [])
@@ -199,15 +197,17 @@ const InterviewRoom = () => {
}, [userRole, isFullScreen])
const generateQuestions = () => {
- // In a real app, these would be generated by an LLM based on the resume and transcript
- const newQuestions = [
- "Can you describe a challenging project you worked on at Google?",
- "How do you approach debugging complex issues in a microservices architecture?",
- "What's your experience with state management in React applications?",
- "How do you stay updated with the latest technologies in your field?",
- "Can you explain a situation where you had to make a difficult technical decision?",
+ // This function would typically fetch questions from an API
+ // For now, we'll use mock data
+ const mockQuestions = [
+ "Tell me about a challenging project you worked on recently.",
+ "How do you handle conflicts in a team?",
+ "What's your experience with React hooks?",
+ "Explain the concept of closures in JavaScript.",
+ "How would you optimize a slow-loading website?",
]
- setQuestions(newQuestions)
+
+ setQuestions(mockQuestions)
}
const handlePinQuestion = (question) => {
@@ -224,7 +224,7 @@ const InterviewRoom = () => {
}
const handleRefreshQuestions = () => {
- generateQuestions()
+ sendTranscript()
}
const handleSkillClick = (skill) => {
@@ -234,12 +234,12 @@ const InterviewRoom = () => {
const handleStartRecording = () => {
setIsRecording(true)
- // In a real app, this would start recording audio/video and analyzing sentiment
+ startListening()
}
const handleStopRecording = () => {
setIsRecording(false)
- // In a real app, this would stop recording and save the transcript
+ stopListening()
}
const handleEndInterview = () => {
@@ -304,6 +304,7 @@ const InterviewRoom = () => {
console.error(`Error attempting to enable full-screen mode: ${err.message}`)
})
setIsFullScreen(true)
+ handleStartRecording() // Automatically start recording when the candidate joins the meeting
}
}
@@ -409,6 +410,163 @@ const InterviewRoom = () => {
return
}
+ const sendTranscript = async () => {
+ try {
+ // Get candidate transcript entries
+ const candidateTranscripts = transcript
+ .filter((entry) => entry.speaker === "Candidate")
+ .map((entry) => entry.text)
+
+ // Store current transcript for comparison
+ lastTranscriptRef.current = [...candidateTranscripts]
+
+ console.log("Sending transcript to backend:", candidateTranscripts)
+
+ const response = await fetch("http://127.0.0.1:5000/generate_question", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({
+ chunks: candidateTranscripts,
+ selected_skills: selectedSkill ? [selectedSkill.name] : [],
+ }),
+ })
+
+ if (!response.ok) {
+ const errorText = await response.text()
+ throw new Error(`API error: ${errorText}`)
+ }
+
+ const result = await response.json()
+ console.log("API response:", result)
+
+ if (result.questions && Array.isArray(result.questions)) {
+ // Filter out empty strings and undefined values
+ const filteredQuestions = result.questions.filter((q) => q && q.trim() !== "")
+ console.log("Setting questions:", filteredQuestions)
+
+ if (filteredQuestions.length > 0) {
+ setQuestions(filteredQuestions)
+ } else {
+ console.warn("No valid questions received from API")
+ // Fallback to mock questions if API returns empty
+ generateQuestions()
+ }
+ } else {
+ console.error("Invalid questions format received:", result)
+ // Fallback to mock questions
+ generateQuestions()
+ }
+ } catch (error) {
+ console.error("Error sending transcript:", error)
+ // Fallback to mock questions on error
+ generateQuestions()
+ }
+ }
+
+ const startListening = () => {
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
+ if (!SpeechRecognition) {
+ console.error("Speech recognition not supported in this browser")
+ return
+ }
+
+ const recognition = new SpeechRecognition()
+ recognition.continuous = true
+ recognition.interimResults = true
+ recognition.lang = "en-US"
+
+ recognition.onresult = (event) => {
+ let interimTranscript = ""
+ let finalTranscript = ""
+
+ for (let i = event.resultIndex; i < event.results.length; i++) {
+ const transcript = event.results[i][0].transcript
+ if (event.results[i].isFinal) {
+ finalTranscript += transcript
+ } else {
+ interimTranscript += transcript
+ }
+ }
+
+ // Only update state if we have a final transcript
+ if (finalTranscript) {
+ console.log("Final speech recognized:", finalTranscript)
+ setTranscript((prevTranscript) => [...prevTranscript, { speaker: "Candidate", text: finalTranscript }])
+
+ // Trigger question generation if we have new transcript content
+ const newTranscriptEntry = { speaker: "Candidate", text: finalTranscript }
+ const updatedTranscript = [...transcript, newTranscriptEntry]
+
+ // Check if we have enough new content to send to backend
+ if (updatedTranscript.length > lastTranscriptRef.current.length + 2) {
+ sendTranscript()
+ }
+ }
+ }
+
+ recognition.onerror = (event) => {
+ console.error("Speech recognition error:", event.error)
+ // Restart recognition on error after a short delay
+ if (isRecording) {
+ setTimeout(() => {
+ if (isRecording && !recognitionRef.current) {
+ startListening()
+ }
+ }, 1000)
+ }
+ }
+
+ recognition.onend = () => {
+ console.log("Speech recognition ended")
+ // Restart recognition if we're still recording
+ if (isRecording) {
+ recognition.start()
+ }
+ }
+
+ // Store the recognition instance in the ref
+ recognitionRef.current = recognition
+
+ // Start recognition
+ try {
+ recognition.start()
+ console.log("Speech recognition started")
+ } catch (error) {
+ console.error("Error starting speech recognition:", error)
+ }
+
+ // Set up interval to send transcript to backend
+ if (transcriptIntervalRef.current) {
+ clearInterval(transcriptIntervalRef.current)
+ }
+ transcriptIntervalRef.current = setInterval(sendTranscript, 30000)
+ }
+
+ const stopListening = () => {
+ if (recognitionRef.current) {
+ try {
+ recognitionRef.current.stop()
+ console.log("Speech recognition stopped")
+ } catch (error) {
+ console.error("Error stopping speech recognition:", error)
+ }
+ recognitionRef.current = null
+ }
+
+ if (transcriptIntervalRef.current) {
+ clearInterval(transcriptIntervalRef.current)
+ transcriptIntervalRef.current = null
+ }
+
+ // Send transcript one last time when stopping
+ sendTranscript()
+ }
+
+ // Filter out empty questions
+ const displayQuestions = questions.filter((q) => q && q.trim() !== "")
+
return (
{/* Role Confirmation Modal */}
@@ -549,6 +707,7 @@ const InterviewRoom = () => {
- {questions.map((question, index) => (
-
setHoveredQuestion(question)}
- onMouseLeave={() => setHoveredQuestion(null)}
- >
-
{question}
-
- {/* Star rating for interviewer */}
-
-
-
-
{pinnedQuestions.length > 0 && (
@@ -707,23 +873,30 @@ const InterviewRoom = () => {
Live Transcript
- {transcript.map((entry, index) => (
-
+ {transcript.length > 0 ? (
+ transcript.map((entry, index) => (
-
{entry.speaker}
-
{entry.text}
+
+
{entry.speaker}
+
{entry.text}
+
+ ))
+ ) : (
+
+
No transcript available yet.
+
Start recording to capture the conversation.
- ))}
+ )}
{isRecording && (