Skip to content

Commit

Permalink
trying something new
Browse files Browse the repository at this point in the history
  • Loading branch information
agha-naveed committed Sep 10, 2024
1 parent 1707fe9 commit 3321d26
Show file tree
Hide file tree
Showing 8 changed files with 257 additions and 6 deletions.
5 changes: 2 additions & 3 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,5 @@ Welcome to Agha Chatbot! This AI-powered chatbot is designed to provide intellig

We welcome contributions to enhance Agha Chatbot. To contribute, please Fork the repository.

### Contact
For any questions or support, please contact us at naveedabs31@gmail.com.

## Contact
For any questions or support, please contact us at naveedabs31@gmail.com
17 changes: 16 additions & 1 deletion package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@
"dompurify": "^3.1.6",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-icons": "^5.3.0"
"react-icons": "^5.3.0",
"react-speech-recognition": "^3.10.0",
"typed.js": "^2.1.0"
},
"devDependencies": {
"@eslint/js": "^9.9.0",
Expand Down
12 changes: 12 additions & 0 deletions src/App.jsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import React from 'react';
import SpeechToText from './SpeechToText';

function App() {
return (
<div>
<SpeechToText />
</div>
);
}

export default App;
107 changes: 107 additions & 0 deletions src/SpeechToText.jsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import React, { useState, useEffect, useRef } from 'react';

const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const recognition = new SpeechRecognition();

recognition.continuous = true;
recognition.interimResults = true;

function ChatBot() {
const [transcript, setTranscript] = useState(''); // Final transcript
const [interim, setInterim] = useState(''); // Interim transcript
const isRecognitionActive = useRef(false);
const inputRef = useRef(null);

useEffect(() => {
recognition.onstart = () => {
console.log('Voice recognition started.');
isRecognitionActive.current = true;
};

recognition.onresult = (event) => {
let interimTranscript = ''; // Temporary holder for interim results
let finalTranscript = ''; // Temporary holder for final results

// Loop through the results
for (let i = event.resultIndex; i < event.results.length; i++) {
const speechResult = event.results[i][0].transcript;

if (event.results[i].isFinal) {
// If it's the final result, add it to the final transcript
finalTranscript += speechResult;

// Check if "OK sir!" was said
if (speechResult.toLowerCase().includes('ok sir')) {
inputRef.current.focus(); // Focus the input field
finalTranscript = finalTranscript.replace(/ok sir/i, '').trim();
}
} else {
// Append the interim transcript (not final yet)
interimTranscript += speechResult;
}
}

// Update the final transcript state (append final results)
setTranscript((prev) => prev + finalTranscript);

// Set the interim transcript separately, so it doesn’t overwrite final results
setInterim(interimTranscript);
};

recognition.onend = () => {
console.log('Voice recognition ended.');
isRecognitionActive.current = false;
// Automatically restart recognition after it ends
if (!isRecognitionActive.current) {
recognition.start();
isRecognitionActive.current = true;
console.log('Voice recognition restarted.');
}
};

recognition.onerror = (event) => {
console.error('Error occurred in speech recognition:', event.error);
isRecognitionActive.current = false;
if (event.error !== 'no-speech' && !isRecognitionActive.current) {
recognition.start();
isRecognitionActive.current = true;
console.log('Voice recognition restarted after error.');
}
};

navigator.mediaDevices.getUserMedia({ audio: true })
.then(() => {
console.log('Microphone access granted');
if (!isRecognitionActive.current) {
recognition.start();
isRecognitionActive.current = true;
console.log('Voice recognition started on mount.');
}
})
.catch((error) => {
console.error('Microphone permission denied:', error);
});

return () => {
recognition.stop();
isRecognitionActive.current = false;
console.log('Voice recognition stopped on unmount.');
};
}, []);

return (
<div>
<h1>ChatBot</h1>
<input
ref={inputRef}
type="text"
value={transcript + interim} // Show final + interim transcripts in the input
onChange={(e) => setTranscript(e.target.value)}
placeholder="Your message"
/>
<p>Listening for "OK sir!"</p>
</div>
);
}

export default ChatBot;
108 changes: 108 additions & 0 deletions src/VoiceToText.jsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import React, { useState, useEffect, useRef } from 'react';
import Homepage from './components/Homepage';

const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const recognition = new SpeechRecognition();

recognition.continuous = true;
recognition.interimResults = true;

export default function VoiceToText() {
const [transcript, setTranscript] = useState(''); // Final transcript
const [interim, setInterim] = useState(''); // Interim transcript
const isRecognitionActive = useRef(false);
const inputRef = useRef(null);

useEffect(() => {
recognition.onstart = () => {
console.log('Voice recognition started.');
isRecognitionActive.current = true;
};

recognition.onresult = (event) => {
let interimTranscript = ''; // Temporary holder for interim results
let finalTranscript = ''; // Temporary holder for final results

// Loop through the results
for (let i = event.resultIndex; i < event.results.length; i++) {
const speechResult = event.results[i][0].transcript;

if (event.results[i].isFinal) {
// If it's the final result, add it to the final transcript
finalTranscript += speechResult;

// Check if "OK sir!" was said
if (speechResult.toLowerCase().includes('ok sir')) {
inputRef.current.focus(); // Focus the input field
finalTranscript = finalTranscript.replace(/ok sir/i, '').trim();
}
} else {
// Append the interim transcript (not final yet)
interimTranscript += speechResult;
}
}

// Update the final transcript state (append final results)
setTranscript((prev) => prev + finalTranscript);

// Set the interim transcript separately, so it doesn’t overwrite final results
setInterim(interimTranscript);
};

recognition.onend = () => {
console.log('Voice recognition ended.');
isRecognitionActive.current = false;
// Automatically restart recognition after it ends
if (!isRecognitionActive.current) {
recognition.start();
isRecognitionActive.current = true;
console.log('Voice recognition restarted.');
}
};

recognition.onerror = (event) => {
console.error('Error occurred in speech recognition:', event.error);
isRecognitionActive.current = false;
if (event.error !== 'no-speech' && !isRecognitionActive.current) {
recognition.start();
isRecognitionActive.current = true;
console.log('Voice recognition restarted after error.');
}
};

navigator.mediaDevices.getUserMedia({ audio: true })
.then(() => {
console.log('Microphone access granted');
if (!isRecognitionActive.current) {
recognition.start();
isRecognitionActive.current = true;
console.log('Voice recognition started on mount.');
}
})
.catch((error) => {
console.error('Microphone permission denied:', error);
});

return () => {
recognition.stop();
isRecognitionActive.current = false;
console.log('Voice recognition stopped on unmount.');
};
}, []);

return (


<div>
<h1>ChatBot</h1>
<input
ref={inputRef}
type="text"
value={transcript + interim} // Show final + interim transcripts in the input
onChange={(e) => setTranscript(e.target.value)}
placeholder="Your message"
/>
<p>Listening for "OK sir!"</p>
</div>
);
}
5 changes: 4 additions & 1 deletion src/components/Homepage.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import DOMPurify from 'dompurify';
import "@fontsource/poppins"
import logo from '../assets/my-logo.webp'
import { IoSearch, IoSettings, IoCloseOutline, IoAdd } from "react-icons/io5";
import { FaMicrophone } from "react-icons/fa";
import { FiMessageSquare } from "react-icons/fi";
import { CgMenuLeft } from "react-icons/cg";
import { Context } from '../context/Context';
Expand Down Expand Up @@ -129,7 +130,9 @@ export default function Homepage() {
<div className='w-full flex'>

<input type="text" ref={ref} placeholder='Enter Prompt...' className='w-full h-12 text-black border-none outline-none rounded-l-3xl pl-6 pr-1' onChange={getSearchData} value={input} onKeyDown={pressEnter} />

<button className="search-icon bg-white h-auto px-[7px]">
<FaMicrophone className='w-[40px] h-[40px] p-[7px] text-2xl text-white rounded-full bg-slate-800' />
</button>
<button onClick={() => {input && onSent()}} className="search-icon bg-white rounded-r-3xl h-auto px-[7px]">
<IoSearch className='w-[40px] h-[40px] p-[7px] text-2xl text-white rounded-full bg-slate-800' />
</button>
Expand Down
5 changes: 5 additions & 0 deletions src/main.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,18 @@ import { createRoot } from 'react-dom/client'
import './index.css'
import Homepage from './components/Homepage'
import ContextProvider from './context/Context'
import VoiceToText from './VoiceToText'
import App from './App'

// import

createRoot(document.getElementById('root')).render(
<StrictMode>
<ContextProvider>

<Homepage />
</ContextProvider>
<VoiceToText />
{/* <App /> */}
</StrictMode>,
)

0 comments on commit 3321d26

Please sign in to comment.