Skip to content

Commit

Permalink
Merge pull request #22 from EntrevistadorInteligente/feature/76241646
Browse files Browse the repository at this point in the history
Se agrega flujo base video llamada
  • Loading branch information
JamiltonQuintero authored Aug 30, 2024
2 parents 848b673 + 7858005 commit b4ca4f8
Show file tree
Hide file tree
Showing 15 changed files with 870 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,4 @@ next-env.d.ts
.idea
package-lock.json
/*.yaml
/*.env
2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@
"lint": "next lint"
},
"dependencies": {
"@heygen/streaming-avatar": "^1.0.16",
"@radix-ui/react-avatar": "^1.1.0",
"@radix-ui/react-dialog": "^1.1.1",
"@radix-ui/react-scroll-area": "^1.1.0",
"@radix-ui/react-select": "^2.1.1",
"@radix-ui/react-separator": "^1.1.0",
Expand Down
180 changes: 180 additions & 0 deletions src/app/(routes)/interview/video-call/dialog.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
import React, { useState, useEffect } from 'react';
import { Dialog, DialogContent, DialogHeader, DialogTitle } from "@/components/ui/dialog"
import { Button } from "@/components/ui/button"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"

interface DeviceInfo {
deviceId: string;
label: string;
}

interface OptionsDialogProps {
isOpen: boolean;
onOpenChange: (open: boolean) => void;
selectedAvatar: any;
onStartInterview: (cameraId: string, microphoneId: string) => void;
}

export const OptionsDialog: React.FC<OptionsDialogProps> = ({
isOpen,
onOpenChange,
selectedAvatar,
onStartInterview
}) => {
const [cameraDevices, setCameraDevices] = useState<DeviceInfo[]>([]);
const [microphoneDevices, setMicrophoneDevices] = useState<DeviceInfo[]>([]);
const [selectedCamera, setSelectedCamera] = useState<string>('');
const [selectedMicrophone, setSelectedMicrophone] = useState<string>('');
const [isCameraReady, setIsCameraReady] = useState(false);
const [cameraError, setCameraError] = useState<string | null>(null);

useEffect(() => {
if (isOpen) {
requestPermissions();
}
}, [isOpen]);

const requestPermissions = async () => {
try {
await navigator.mediaDevices.getUserMedia({ video: true, audio: true });
await getDevices();
} catch (err) {
console.error('Error requesting permissions:', err);
setCameraError('Failed to get camera and microphone permissions. Please allow access and try again.');
}
};

const getDevices = async () => {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
const cameras = devices.filter(device => device.kind === 'videoinput');
const microphones = devices.filter(device => device.kind === 'audioinput');
setCameraDevices(cameras.map(camera => ({ deviceId: camera.deviceId, label: camera.label })));
setMicrophoneDevices(microphones.map(mic => ({ deviceId: mic.deviceId, label: mic.label })));
if (cameras.length > 0) setSelectedCamera(cameras[0].deviceId);
if (microphones.length > 0) setSelectedMicrophone(microphones[0].deviceId);
} catch (err) {
console.error('Error getting devices:', err);
setCameraError('Failed to get camera and microphone devices. Please check your permissions.');
}
};

const initializeCamera = async () => {
if (!selectedCamera) {
console.error('No camera selected');
setCameraError('No camera selected. Please choose a camera from the list.');
return;
}

try {
const stream = await navigator.mediaDevices.getUserMedia({
video: { deviceId: { exact: selectedCamera } },
audio: false
});

// We're just testing if we can get the stream, then stopping it immediately
stream.getTracks().forEach(track => track.stop());
setIsCameraReady(true);
setCameraError(null);
} catch (err) {
console.error('Error initializing camera:', err);
let errorMessage = 'Failed to initialize camera. ';
if (err instanceof DOMException) {
switch (err.name) {
case 'NotFoundError':
errorMessage += 'Camera not found. Please ensure your camera is connected and not in use by another application.';
break;
case 'NotAllowedError':
errorMessage += 'Camera access denied. Please grant permission to use the camera.';
break;
case 'NotReadableError':
errorMessage += 'Could not start video source. Please try closing other applications that might be using the camera.';
break;
default:
errorMessage += 'Please check your camera permissions and try again.';
}
}
setCameraError(errorMessage);
setIsCameraReady(false);
}
};

useEffect(() => {
if (selectedCamera) {
initializeCamera();
}
}, [selectedCamera]);

const handleStartInterview = () => {
onStartInterview(selectedCamera, selectedMicrophone);
};

return (
<Dialog open={isOpen} onOpenChange={onOpenChange}>
<DialogContent>
<DialogHeader>
<DialogTitle>Start Interview with {selectedAvatar?.avatar_name}?</DialogTitle>
</DialogHeader>
<p>Are you ready to begin your AI-powered interview experience?</p>
{selectedAvatar && (
<video
src={selectedAvatar.preview_video_url}
className="w-full rounded-lg my-4"
controls
autoPlay
loop
muted
>
Your browser does not support the video tag.
</video>
)}
<div className="space-y-4">
{cameraDevices.length > 0 && (
<div>
<label htmlFor="camera-select" className="block text-sm font-medium text-gray-700">Camera</label>
<Select onValueChange={setSelectedCamera} value={selectedCamera}>
<SelectTrigger id="camera-select">
<SelectValue placeholder="Select a camera" />
</SelectTrigger>
<SelectContent>
{cameraDevices.map((device) => (
<SelectItem key={device.deviceId} value={device.deviceId}>
{device.label || `Camera ${device.deviceId.substr(0, 5)}`}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
)}
{microphoneDevices.length > 0 && (
<div>
<label htmlFor="microphone-select" className="block text-sm font-medium text-gray-700">Microphone</label>
<Select onValueChange={setSelectedMicrophone} value={selectedMicrophone}>
<SelectTrigger id="microphone-select">
<SelectValue placeholder="Select a microphone" />
</SelectTrigger>
<SelectContent>
{microphoneDevices.map((device) => (
<SelectItem key={device.deviceId} value={device.deviceId}>
{device.label || `Microphone ${device.deviceId.substr(0, 5)}`}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
)}
<Button
onClick={handleStartInterview}
className="w-full bg-black text-white hover:bg-gray-800"
disabled={!isCameraReady || !!cameraError}
>
{isCameraReady ? 'Begin Interview' : 'Initializing Camera...'}
</Button>
{cameraError && (
<p className="text-red-500 text-sm">{cameraError}</p>
)}
</div>
</DialogContent>
</Dialog>
);
};
50 changes: 50 additions & 0 deletions src/app/(routes)/interview/video-call/interviewerListProps .tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import React from 'react';
import { Card, CardContent } from "@/components/ui/card"
import { Button } from "@/components/ui/button"
import { Avatar, AvatarImage, AvatarFallback } from "@/components/ui/avatar"
import { ScrollArea } from "@/components/ui/scroll-area"

interface Avatar {
avatar_id: string;
avatar_name: string;
gender: string;
preview_image_url: string;
preview_video_url: string;
}

interface InterviewerListProps {
avatars: Avatar[];
onSelectAvatar: (avatar: Avatar) => void;
}

export const InterviewerList: React.FC<InterviewerListProps> = ({ avatars, onSelectAvatar }) => {
return (
<Card className="flex-grow overflow-hidden">
<CardContent className="p-6 h-full">
<ScrollArea className="h-full">
<div className="grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4">
{avatars.map((avatar) => (
<Card key={avatar.avatar_id} className="flex flex-col">
<CardContent className="p-4 flex-grow flex flex-col justify-between">
<div>
<Avatar className="w-32 h-32 mx-auto mb-4">
<AvatarImage src={avatar.preview_image_url} alt={avatar.avatar_name} />
<AvatarFallback>{avatar.avatar_name[0]}</AvatarFallback>
</Avatar>
<h2 className="text-xl font-semibold text-center mb-4">{avatar.avatar_name}</h2>
</div>
<Button
className="w-full"
onClick={() => onSelectAvatar(avatar)}
>
Select
</Button>
</CardContent>
</Card>
))}
</div>
</ScrollArea>
</CardContent>
</Card>
);
};
147 changes: 147 additions & 0 deletions src/app/(routes)/interview/video-call/page.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
'use client'

import React, { useState, useEffect } from 'react'
import { createVideoChatManagerService } from '@/modules/interview_manager/application/service/videoChatManagerService'
import { createVideoChatManagerRepositoryAdapter } from '@/modules/interview_manager/infrastructure/adapter/videoChatManagerRepositoryAdapter'
import { Skeleton } from "@/components/ui/skeleton"
import { Button } from "@/components/ui/button"
import { OptionsDialog } from './dialog'
import { VideoCall } from './videoCall'
import { InterviewerList } from './interviewerListProps '

const api_token = "MDdlYzkyNjljY2M2NDQyZjg1ZTAwYjQxMDQ2OWZkMGYtMTcyMjM5NzAxMA=="

interface Avatar {
avatar_id: string;
avatar_name: string;
gender: string;
preview_image_url: string;
preview_video_url: string;
}

export default function Page() {
const [avatars, setAvatars] = useState<Avatar[]>([])
const [selectedAvatar, setSelectedAvatar] = useState<Avatar | null>(null)
const [isInterviewStarted, setIsInterviewStarted] = useState(false)
const [isLoading, setIsLoading] = useState(true)
const [error, setError] = useState<string | null>(null)
const [streamingToken, setStreamingToken] = useState<string | null>(null)
const [isConfirmDialogOpen, setIsConfirmDialogOpen] = useState(false)
const [selectedCameraId, setSelectedCameraId] = useState<string>('')
const [selectedMicrophoneId, setSelectedMicrophoneId] = useState<string>('')

const videoChatRepositoryPort = createVideoChatManagerRepositoryAdapter();
const videoChatService = createVideoChatManagerService(videoChatRepositoryPort);

useEffect(() => {
const fetchAvatars = async () => {
if (!api_token) {
throw new Error("API token is not defined")
}
try {
const response = await fetch('https://api.heygen.com/v2/avatars', {
headers: {
'x-api-key': api_token,
},
})
if (!response.ok) throw new Error('Failed to fetch avatars')
const data = await response.json()
const filteredAvatars = data.data.avatars
.filter((avatar: Avatar) =>
!avatar.avatar_name.includes('(Left)') && !avatar.avatar_name.includes('(Right)')
)
.map((avatar: Avatar) => ({
...avatar,
avatar_name: avatar.avatar_name.replace(' (Front)', '')
}))
setAvatars(filteredAvatars)
} catch (err) {
setError('Failed to load avatars. Please try again later.')
} finally {
setIsLoading(false)
}
}

fetchAvatars()
}, [])

const getStreamingToken = async () => {
try {
const response = await videoChatService.getToken()
if (!response) throw new Error('Failed to get streaming token')
setStreamingToken(response.token)
} catch (err) {
setError('Failed to get streaming token. Please try again.')
}
}

const handleSelectAvatar = (avatar: Avatar) => {
setSelectedAvatar(avatar)
getStreamingToken()
setIsConfirmDialogOpen(true)
}

const handleStartInterview = (cameraId: string, microphoneId: string) => {
setSelectedCameraId(cameraId)
setSelectedMicrophoneId(microphoneId)
setIsInterviewStarted(true)
setIsConfirmDialogOpen(false)
}

const handleEndInterview = () => {
setIsInterviewStarted(false)
setSelectedAvatar(null)
setStreamingToken(null)
setSelectedCameraId('')
setSelectedMicrophoneId('')
}

if (isLoading) {
return (
<div className="container mx-auto p-4 h-screen flex flex-col items-center justify-center">
<Skeleton className="w-[300px] h-[200px] rounded-lg" />
<Skeleton className="w-[250px] h-[20px] mt-4" />
<Skeleton className="w-[200px] h-[40px] mt-4" />
</div>
)
}

if (error) {
return (
<div className="container mx-auto p-4 h-screen flex flex-col items-center justify-center">
<p className="text-red-500 text-xl">{error}</p>
<Button onClick={() => window.location.reload()} className="mt-4">
Retry
</Button>
</div>
)
}

return (
<div className="container mx-auto p-4 h-screen flex flex-col">
<h1 className="text-3xl font-bold mb-6">AI Interview Experience</h1>

{!isInterviewStarted ? (
<InterviewerList
avatars={avatars}
onSelectAvatar={handleSelectAvatar}
/>
) : (
<VideoCall
selectedAvatar={selectedAvatar!}
streamingToken={streamingToken!}
cameraId={selectedCameraId}
microphoneId={selectedMicrophoneId}
onEndInterview={handleEndInterview}
/>
)}

<OptionsDialog
isOpen={isConfirmDialogOpen}
onOpenChange={setIsConfirmDialogOpen}
selectedAvatar={selectedAvatar}
onStartInterview={handleStartInterview}
/>
</div>
)
}
Loading

0 comments on commit b4ca4f8

Please sign in to comment.