From 719b4a495af8e2f3394e4ee5dcd92fb02bc6db97 Mon Sep 17 00:00:00 2001 From: Bear-03 <64696287+Bear-03@users.noreply.github.com> Date: Sun, 27 Oct 2024 00:15:06 +0200 Subject: [PATCH] style: fix lints --- crates/vosk/src/gpu.rs | 2 +- crates/vosk/src/lib.rs | 6 ++++-- crates/vosk/src/models/batch.rs | 2 +- crates/vosk/src/models/sequential.rs | 2 +- crates/vosk/src/recognition/batch.rs | 16 +++++----------- crates/vosk/src/recognition/mod.rs | 5 +++-- crates/vosk/src/recognition/sequential.rs | 19 +++++++++++-------- 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/crates/vosk/src/gpu.rs b/crates/vosk/src/gpu.rs index 10e4942..f1ef17f 100644 --- a/crates/vosk/src/gpu.rs +++ b/crates/vosk/src/gpu.rs @@ -8,4 +8,4 @@ pub fn gpu_init() { /// Must be called for each thread. pub fn gpu_thread_init() { unsafe { vosk_sys::vosk_gpu_thread_init() } -} \ No newline at end of file +} diff --git a/crates/vosk/src/lib.rs b/crates/vosk/src/lib.rs index 3f8f4a8..dee6917 100644 --- a/crates/vosk/src/lib.rs +++ b/crates/vosk/src/lib.rs @@ -6,7 +6,7 @@ //! * Create a [`Recognizer`] with that model //! * Feel audio to the recognizer with [`Recognizer::accept_waveform`] //! * Get the processed result with [`Recognizer::result`], -//! [`Recognizer::partial_result`] or [`Recognizer::final_result`] +//! [`Recognizer::partial_result`] or [`Recognizer::final_result`] #[cfg(feature = "batch")] mod gpu; @@ -14,4 +14,6 @@ mod log; mod models; mod recognition; -pub use crate::{log::*, models::*, recognition::{*, results::*}}; +pub use crate::{log::*, models::*, recognition::*}; +#[cfg(feature = "batch")] +pub use gpu::*; diff --git a/crates/vosk/src/models/batch.rs b/crates/vosk/src/models/batch.rs index 44119fd..e0e3026 100644 --- a/crates/vosk/src/models/batch.rs +++ b/crates/vosk/src/models/batch.rs @@ -30,4 +30,4 @@ impl Drop for BatchModel { } unsafe impl Send for BatchModel {} -unsafe impl Sync for BatchModel {} \ No newline at end of file +unsafe impl Sync for BatchModel {} diff --git a/crates/vosk/src/models/sequential.rs b/crates/vosk/src/models/sequential.rs index 39b090f..8cfa04f 100644 --- a/crates/vosk/src/models/sequential.rs +++ b/crates/vosk/src/models/sequential.rs @@ -75,4 +75,4 @@ impl Drop for SpeakerModel { } unsafe impl Send for SpeakerModel {} -unsafe impl Sync for SpeakerModel {} \ No newline at end of file +unsafe impl Sync for SpeakerModel {} diff --git a/crates/vosk/src/recognition/batch.rs b/crates/vosk/src/recognition/batch.rs index acfcd48..e1d678c 100644 --- a/crates/vosk/src/recognition/batch.rs +++ b/crates/vosk/src/recognition/batch.rs @@ -14,16 +14,15 @@ impl BatchRecognizer { /// The recognizers process the speech and return text using shared model data. /// /// * `model` - [`BatchModel`] containing static data for recognizer. Model can be shared - /// across recognizers, even running in different threads. + /// across recognizers, even running in different threads. /// /// * `sample_rate` - The sample rate of the audio you going to feed into the recognizer. - /// Make sure this rate matches the audio content, it is a common issue causing accuracy problems. + /// Make sure this rate matches the audio content, it is a common issue causing accuracy problems. /// /// [`BatchModel`]: crate::BatchModel #[must_use] pub fn new(model: &BatchModel, sample_rate: f32) -> Option { - let recognizer_ptr = - unsafe { vosk_batch_recognizer_new(model.0.as_ptr(), sample_rate) }; + let recognizer_ptr = unsafe { vosk_batch_recognizer_new(model.0.as_ptr(), sample_rate) }; Some(Self(NonNull::new(recognizer_ptr)?)) } @@ -37,11 +36,7 @@ impl BatchRecognizer { /// * `data` - Audio data in PCM 16-bit mono format as an array of i8. pub fn accept_waveform(&mut self, data: &[i8]) { unsafe { - vosk_batch_recognizer_accept_waveform( - self.0.as_ptr(), - data.as_ptr(), - data.len() as i32, - ) + vosk_batch_recognizer_accept_waveform(self.0.as_ptr(), data.as_ptr(), data.len() as i32) }; } @@ -76,9 +71,8 @@ unsafe impl Send for BatchRecognizer {} // which ensures exclusive access, so it is Sync unsafe impl Sync for BatchRecognizer {} - impl Drop for BatchRecognizer { fn drop(&mut self) { unsafe { vosk_batch_recognizer_free(self.0.as_ptr()) } } -} \ No newline at end of file +} diff --git a/crates/vosk/src/recognition/mod.rs b/crates/vosk/src/recognition/mod.rs index 519830f..3312c73 100644 --- a/crates/vosk/src/recognition/mod.rs +++ b/crates/vosk/src/recognition/mod.rs @@ -2,11 +2,12 @@ use std::os::raw::c_int; #[cfg(feature = "batch")] mod batch; +mod results; mod sequential; -pub mod results; #[cfg(feature = "batch")] pub use batch::BatchRecognizer; +pub use results::*; pub use sequential::Recognizer; #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -29,4 +30,4 @@ impl DecodingState { _ => Self::Failed, } } -} \ No newline at end of file +} diff --git a/crates/vosk/src/recognition/sequential.rs b/crates/vosk/src/recognition/sequential.rs index 2f2df7d..96425bd 100644 --- a/crates/vosk/src/recognition/sequential.rs +++ b/crates/vosk/src/recognition/sequential.rs @@ -1,5 +1,8 @@ +use super::{ + results::{CompleteResult, PartialResult}, + DecodingState, +}; use crate::models::{Model, SpeakerModel}; -use super::{DecodingState, results::{CompleteResult, PartialResult}}; use serde::Deserialize; use std::{ @@ -21,10 +24,10 @@ impl Recognizer { /// The recognizers process the speech and return text using shared model data. /// /// * `model` - [`Model`] containing static data for recognizer. Model can be shared - /// across recognizers, even running in different threads. + /// across recognizers, even running in different threads. /// /// * `sample_rate` - The sample rate of the audio you going to feed into the recognizer. - /// Make sure this rate matches the audio content, it is a common issue causing accuracy problems. + /// Make sure this rate matches the audio content, it is a common issue causing accuracy problems. /// /// [`Model`]: crate::Model #[must_use] @@ -39,11 +42,11 @@ impl Recognizer { /// text but also return speaker vectors one can use for speaker identification /// /// * `model` - [`Model`] containing the data for recognizer. Model can be - /// shared across recognizers, even running in different threads. + /// shared across recognizers, even running in different threads. /// /// * `sample_rate` - The sample rate of the audio you going to feed into the recognizer. - /// Make sure this rate matches the audio content, it is a common - /// issue causing accuracy problems. + /// Make sure this rate matches the audio content, it is a common + /// issue causing accuracy problems. /// /// * `spk_model` - Speaker model for speaker identification. /// @@ -73,10 +76,10 @@ impl Recognizer { /// Precompiled HCLG graph models are not supported. /// /// * `model` - [`Model`] containing the data for recognizer. Model can be shared - /// across recognizers, even running in different threads. + /// across recognizers, even running in different threads. /// /// * `sample_rate` - The sample rate of the audio you going to feed into the recognizer. - /// Make sure this rate matches the audio content, it is a common issue causing accuracy problems. + /// Make sure this rate matches the audio content, it is a common issue causing accuracy problems. /// /// * `grammar` - The list of phrases to recognize. ///