forked from k2-fsa/sherpa-onnx
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdecode-file-non-streaming.swift
109 lines (93 loc) · 3.14 KB
/
decode-file-non-streaming.swift
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import AVFoundation
extension AudioBuffer {
func array() -> [Float] {
return Array(UnsafeBufferPointer(self))
}
}
extension AVAudioPCMBuffer {
func array() -> [Float] {
return self.audioBufferList.pointee.mBuffers.array()
}
}
func run() {
var recognizer: SherpaOnnxOfflineRecognizer
var modelConfig: SherpaOnnxOfflineModelConfig
var modelType = "whisper"
// modelType = "paraformer"
// modelType = "sense_voice"
if modelType == "whisper" {
let encoder = "./sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx"
let decoder = "./sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx"
let tokens = "./sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt"
let whisperConfig = sherpaOnnxOfflineWhisperModelConfig(
encoder: encoder,
decoder: decoder
)
modelConfig = sherpaOnnxOfflineModelConfig(
tokens: tokens,
whisper: whisperConfig,
debug: 0,
modelType: "whisper"
)
} else if modelType == "paraformer" {
let model = "./sherpa-onnx-paraformer-zh-2023-09-14/model.int8.onnx"
let tokens = "./sherpa-onnx-paraformer-zh-2023-09-14/tokens.txt"
let paraformerConfig = sherpaOnnxOfflineParaformerModelConfig(
model: model
)
modelConfig = sherpaOnnxOfflineModelConfig(
tokens: tokens,
paraformer: paraformerConfig,
debug: 0,
modelType: "paraformer"
)
} else if modelType == "sense_voice" {
let model = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx"
let tokens = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt"
let senseVoiceConfig = sherpaOnnxOfflineSenseVoiceModelConfig(
model: model,
useInverseTextNormalization: true
)
modelConfig = sherpaOnnxOfflineModelConfig(
tokens: tokens,
debug: 0,
senseVoice: senseVoiceConfig
)
} else {
print("Please specify a supported modelType \(modelType)")
return
}
let featConfig = sherpaOnnxFeatureConfig(
sampleRate: 16000,
featureDim: 80
)
var config = sherpaOnnxOfflineRecognizerConfig(
featConfig: featConfig,
modelConfig: modelConfig
)
recognizer = SherpaOnnxOfflineRecognizer(config: &config)
var filePath = "./sherpa-onnx-whisper-tiny.en/test_wavs/0.wav"
if modelType == "sense_voice" {
filePath = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/zh.wav"
}
let fileURL: NSURL = NSURL(fileURLWithPath: filePath)
let audioFile = try! AVAudioFile(forReading: fileURL as URL)
let audioFormat = audioFile.processingFormat
assert(audioFormat.channelCount == 1)
assert(audioFormat.commonFormat == AVAudioCommonFormat.pcmFormatFloat32)
let audioFrameCount = UInt32(audioFile.length)
let audioFileBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: audioFrameCount)
try! audioFile.read(into: audioFileBuffer!)
let array: [Float]! = audioFileBuffer?.array()
let result = recognizer.decode(samples: array, sampleRate: Int(audioFormat.sampleRate))
print("\nresult is:\n\(result.text)")
if result.timestamps.count != 0 {
print("\ntimestamps is:\n\(result.timestamps)")
}
}
@main
struct App {
static func main() {
run()
}
}