Skip to content

Commit

Permalink
Update CI & test-suite. (#57)
Browse files Browse the repository at this point in the history
  • Loading branch information
zh-plus authored Sep 10, 2024
1 parent 0908a65 commit b31d499
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 13 deletions.
3 changes: 2 additions & 1 deletion openlrc/openlrc.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ class LRCer:
whisper_model (str): Name of whisper model (tiny, tiny.en, base, base.en, small, small.en, medium,
medium.en, large-v1, large-v2, large-v3, distill-large-v3) When a size is configured,
the converted model is downloaded from the Hugging Face Hub. Default: ``large-v3``
compute_type (str): The type of computation to use. Can be ``int8``, ``int8_float16``, ``int16``,
compute_type (str): The type of computation to use. Can be ``default``, ``int8``, ``int8_float16``, ``int16``,
``float16`` or ``float32``. Default: ``float16``
Note: ``default`` will keep the same quantization that was used during model conversion.
device (str): The device to use for computation. Default: ``cuda``
chatbot_model (str): The chatbot model to use, check the available models using list_chatbot_models().
Default: ``gpt-4o-mini``
Expand Down
12 changes: 6 additions & 6 deletions tests/test_openlrc.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,36 +56,36 @@ def clear_paths(input_path):
@patch('openlrc.translate.LLMTranslator.translate',
MagicMock(return_value=['test translation1', 'test translation2']))
def test_single_audio_transcription_translation(self):
lrcer = LRCer(whisper_model='tiny')
lrcer = LRCer(whisper_model='tiny', device='cpu', compute_type='default')
result = lrcer.run(self.audio_path)
self.assertTrue(result)

@patch('openlrc.translate.LLMTranslator.translate',
MagicMock(return_value=['test translation1', 'test translation2']))
def test_multiple_audio_transcription_translation(self):
lrcer = LRCer(whisper_model='tiny')
lrcer = LRCer(whisper_model='tiny', device='cpu', compute_type='default')
result = lrcer.run([self.audio_path, self.video_path])
self.assertTrue(result)
self.assertEqual(len(result), 2)

def test_audio_file_not_found(self):
lrcer = LRCer(whisper_model='tiny')
lrcer = LRCer(whisper_model='tiny', device='cpu', compute_type='default')
with self.assertRaises(FileNotFoundError):
lrcer.run('data/invalid.mp3')

def test_video_file_transcription_translation(self):
lrcer = LRCer(whisper_model='tiny')
lrcer = LRCer(whisper_model='tiny', device='cpu', compute_type='default')
result = lrcer.run('data/test_video.mp4')
self.assertTrue(result)

@patch('openlrc.translate.LLMTranslator.translate', MagicMock(side_effect=Exception('test exception')))
def test_translation_error(self):
lrcer = LRCer(whisper_model='tiny')
lrcer = LRCer(whisper_model='tiny', device='cpu', compute_type='default')
with self.assertRaises(Exception):
lrcer.run(self.audio_path)

@patch('openlrc.translate.LLMTranslator.translate', MagicMock(side_effect=Exception('test exception')))
def test_skip_translation(self):
lrcer = LRCer(whisper_model='tiny')
lrcer = LRCer(whisper_model='tiny', device='cpu', compute_type='default')
result = lrcer.run('data/test_video.mp4', skip_trans=True)
self.assertTrue(result)
10 changes: 4 additions & 6 deletions tests/test_transcribe.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,20 +28,18 @@ def setUp(self) -> None:
self.audio_path = Path('data/test_audio.wav')

@patch('openlrc.transcribe.BatchedInferencePipeline')
@patch('openlrc.transcribe.WhisperModel')
def test_transcribe_success(self, MockWhisperModel, MockBatchedInferencePipeline):
def test_transcribe_success(self, MockBatchedInferencePipeline):
MockBatchedInferencePipeline.return_value.transcribe.return_value = return_tuple

transcriber = Transcriber(model_name='tiny')
transcriber = Transcriber(model_name='tiny', device='cpu', compute_type='default')
result, info = transcriber.transcribe(self.audio_path)
self.assertIsNotNone(result)
self.assertEqual(round(info.duration), 30)

@patch('openlrc.transcribe.BatchedInferencePipeline')
@patch('openlrc.transcribe.WhisperModel')
def test_audio_file_not_found(self, MockWhisperModel, MockBatchedInferencePipeline):
def test_audio_file_not_found(self, MockBatchedInferencePipeline):
MockBatchedInferencePipeline.return_value.transcribe.return_value = return_tuple

transcriber = Transcriber(model_name='tiny')
transcriber = Transcriber(model_name='tiny', device='cpu', compute_type='default')
with self.assertRaises(FileNotFoundError):
transcriber.transcribe('audio.wav')

0 comments on commit b31d499

Please sign in to comment.