Skip to content

Commit

Permalink
🔧 chore: Merge
Browse files Browse the repository at this point in the history
  • Loading branch information
canisminor1990 committed Nov 16, 2023
1 parent 18cbd39 commit 8e2debc
Show file tree
Hide file tree
Showing 25 changed files with 245 additions and 181 deletions.
29 changes: 29 additions & 0 deletions api/openai-stt.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import OpenAI from 'openai';

import { OpenAISTTPayload } from '@/core';

import { createOpenaiAudioTranscriptions } from '../src/server/createOpenaiAudioTranscriptions';

export const config = {
runtime: 'edge',
};

export default async (req: Request) => {
if (req.method !== 'POST') return new Response('Method Not Allowed', { status: 405 });

const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL;

if (!OPENAI_API_KEY) return new Response('OPENAI_API_KEY is not set', { status: 500 });

const payload = (await req.json()) as OpenAISTTPayload;

const openai = new OpenAI({ apiKey: OPENAI_API_KEY, baseURL: OPENAI_PROXY_URL });
const res = await createOpenaiAudioTranscriptions({ openai, payload });

return new Response(JSON.stringify(res), {
headers: {
'content-type': 'application/json;charset=UTF-8',
},
});
};
23 changes: 23 additions & 0 deletions api/openai-tts.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import OpenAI from 'openai';

import { OpenAITTSPayload } from '@/core';

import { createOpenaiAudioSpeech } from '../src/server/createOpenaiAudioSpeech';

export const config = {
runtime: 'edge',
};

export default async (req: Request) => {
if (req.method !== 'POST') return new Response('Method Not Allowed', { status: 405 });
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL;

if (!OPENAI_API_KEY) return new Response('OPENAI_API_KEY is not set', { status: 500 });

const payload = (await req.json()) as OpenAITTSPayload;

const openai = new OpenAI({ apiKey: OPENAI_API_KEY, baseURL: OPENAI_PROXY_URL });

return createOpenaiAudioSpeech({ openai, payload });
};
4 changes: 2 additions & 2 deletions src/react/hooks/useAudioPlayer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@ import { arrayBufferConvert } from '@/core/utils/arrayBufferConvert';
import { audioBufferToBlob } from '@/core/utils/audioBufferToBlob';
import { AudioProps } from '@/react/AudioPlayer';

export interface AudioPlayerHook extends AudioProps {
export interface AudioPlayerReturn extends AudioProps {
isLoading?: boolean;
ref: RefObject<HTMLAudioElement>;
reset: () => void;
}

export const useAudioPlayer = (src: string): AudioPlayerHook => {
export const useAudioPlayer = (src: string): AudioPlayerReturn => {
const audioRef = useRef<HTMLAudioElement>(new Audio());
const [currentTime, setCurrentTime] = useState(0);
const [duration, setDuration] = useState(0);
Expand Down
4 changes: 2 additions & 2 deletions src/react/hooks/useStreamAudioPlayer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@ import { RefObject, useCallback, useEffect, useRef, useState } from 'react';
import { audioBufferToBlob, audioBuffersToBlob } from '@/core/utils/audioBufferToBlob';
import { AudioProps } from '@/react/AudioPlayer';

export interface StreamAudioPlayerHook extends AudioProps {
export interface StreamAudioPlayerReturn extends AudioProps {
download: () => void;
load: (audioBuffer: AudioBuffer) => void;
ref: RefObject<HTMLAudioElement>;
reset: () => void;
}

export const useStreamAudioPlayer = (): StreamAudioPlayerHook => {
export const useStreamAudioPlayer = (): StreamAudioPlayerReturn => {
const audioRef = useRef<HTMLAudioElement>(new Audio());
const [audioBuffers, setAudioBuffer] = useState<AudioBuffer[]>([]);
const [currentTime, setCurrentTime] = useState(0);
Expand Down
18 changes: 4 additions & 14 deletions src/react/index.ts
Original file line number Diff line number Diff line change
@@ -1,23 +1,13 @@
export { default as AudioPlayer, type AudioPlayerProps } from './AudioPlayer';
export { default as AudioVisualizer, type AudioVisualizerProps } from './AudioVisualizer';
export { type AudioPlayerHook, useAudioPlayer } from './hooks/useAudioPlayer';
export { type AudioPlayerReturn, useAudioPlayer } from './hooks/useAudioPlayer';
export { useAudioVisualizer } from './hooks/useAudioVisualizer';
export { useBlobUrl } from './hooks/useBlobUrl';
export { useStreamAudioPlayer } from './hooks/useStreamAudioPlayer';
export { useAudioRecorder } from './useAudioRecorder';
export { type EdgeSpeechOptions, useEdgeSpeech } from './useEdgeSpeech';
export { type MicrosoftSpeechOptions, useMicrosoftSpeech } from './useMicrosoftSpeech';
export {
type OpenAISTTConfig,
useOpenaiSTT,
useOpenaiSTTWithPSR,
useOpenaiSTTWithRecord,
useOpenaiSTTWithSR,
} from './useOpenaiSTT';
export { type OpenAITTSConfig, useOpenaiTTS } from './useOpenaiTTS';
export { usePersistedSpeechRecognition } from './useSpeechRecognition/usePersistedSpeechRecognition';
export {
type SpeechRecognitionOptions,
useSpeechRecognition,
} from './useSpeechRecognition/useSpeechRecognition';
export { type OpenAISTTOptions, useOpenAISTT } from './useOpenAISTT';
export { type OpenAITTSOptions, useOpenAITTS } from './useOpenAITTS';
export { type SpeechRecognitionOptions, useSpeechRecognition } from './useSpeechRecognition';
export { type SpeechSynthesOptions, useSpeechSynthes } from './useSpeechSynthes';
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { useOpenaiSTTWithSR } from '@lobehub/tts/react';
import { useOpenAISTT } from '@lobehub/tts/react';
import { Icon, StoryBook, useControls, useCreateStore } from '@lobehub/ui';
import { Button, Input } from 'antd';
import { Mic, StopCircle } from 'lucide-react';
Expand Down Expand Up @@ -29,10 +29,10 @@ export default () => {
{ store },
);

const { text, start, stop, isLoading, isRecording, url, formattedTime } = useOpenaiSTTWithSR(
locale,
{ api },
);
const { text, start, stop, isLoading, isRecording, url, formattedTime } = useOpenAISTT(locale, {
api,
autoStop: true,
});
return (
<StoryBook levaStore={store}>
<Flexbox gap={8}>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { useOpenaiSTTWithPSR } from '@lobehub/tts/react';
import { useOpenAISTT } from '@lobehub/tts/react';
import { Icon, StoryBook, useControls, useCreateStore } from '@lobehub/ui';
import { Button, Input } from 'antd';
import { Mic, StopCircle } from 'lucide-react';
Expand Down Expand Up @@ -29,11 +29,10 @@ export default () => {
{ store },
);

const { text, start, stop, isLoading, isRecording, url, formattedTime } = useOpenaiSTTWithPSR(
locale,
{ api },
);

const { text, start, stop, isLoading, isRecording, url, formattedTime } = useOpenAISTT(locale, {
api,
autoStop: true,
});
return (
<StoryBook levaStore={store}>
<Flexbox gap={8}>
Expand Down
15 changes: 15 additions & 0 deletions src/react/useOpenAISTT/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
---
nav: Components
group: STT
title: useOpenAISTT
---

## hooks

- ENV: `OPENAI_API_KEY` `OPENAI_PROXY_URL`

<code src="./demos/index.tsx" nopadding></code>

## Auto Stop

<code src="./demos/AutoStop.tsx" nopadding></code>
12 changes: 12 additions & 0 deletions src/react/useOpenAISTT/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import { useOpenAISTTAutoStop } from './useOpenAISTTAutoStop';
import { useOpenAISTTInteractive } from './useOpenAISTTInteractive';
import { OpenAISTTRecorderOptions } from './useOpenAISTTRecorder';

export interface OpenAISTTOptions extends OpenAISTTRecorderOptions {
autoStop?: boolean;
}

export const useOpenAISTT = (locale: string, { autoStop, ...rest }: OpenAISTTOptions = {}) => {
const selectedHook = autoStop ? useOpenAISTTAutoStop : useOpenAISTTInteractive;
return selectedHook(locale, rest);
};
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import { useCallback, useState } from 'react';

import { useOpenaiSTT } from '@/react/useOpenaiSTT/useOpenaiSTT';
import { useSpeechRecognition } from '@/react/useSpeechRecognition';
import { useOpenAISTTCore } from '@/react/useOpenAISTT/useOpenAISTTCore';
import { useSpeechRecognitionAutoStop } from '@/react/useSpeechRecognition/useSpeechRecognitionAutoStop';

import { STTConfig } from './useOpenaiSTTWithRecord';
import { OpenAISTTRecorderOptions } from './useOpenAISTTRecorder';

export const useOpenaiSTTWithSR = (
export const useOpenAISTTAutoStop = (
locale: string,
{
onBlobAvailable,
Expand All @@ -16,8 +16,12 @@ export const useOpenaiSTTWithSR = (
onStart,
onStop,
options,
onRecognitionStop,
onRecognitionStart,
onRecognitionError,
onRecognitionFinish,
...restConfig
}: STTConfig = {},
}: OpenAISTTRecorderOptions = {},
) => {
const [isGlobalLoading, setIsGlobalLoading] = useState<boolean>(false);
const [shouldFetch, setShouldFetch] = useState<boolean>(false);
Expand All @@ -30,11 +34,15 @@ export const useOpenaiSTTWithSR = (
isLoading: isRecording,
time,
formattedTime,
} = useSpeechRecognition(locale, {
} = useSpeechRecognitionAutoStop(locale, {
onBlobAvailable: (blobData) => {
setShouldFetch(true);
onBlobAvailable?.(blobData);
},
onRecognitionError,
onRecognitionFinish,
onRecognitionStart,
onRecognitionStop,
onTextChange: (data) => {
setText(data);
onTextChange?.(data);
Expand All @@ -55,7 +63,7 @@ export const useOpenaiSTTWithSR = (
setIsGlobalLoading(false);
}, [stop]);

const { isLoading } = useOpenaiSTT({
const { isLoading } = useOpenAISTTCore({
onError: (err, ...rest) => {
onError?.(err, ...rest);
console.error(err);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@ import useSWR, { type SWRConfiguration } from 'swr';

import { OpenAISTTPayload, OpenaiSTT } from '@/core/OpenAISTT';

export interface OpenAISTTConfig extends OpenAISTTPayload, SWRConfiguration {
export interface OpenAISTTCoreOptions extends OpenAISTTPayload, SWRConfiguration {
api?: {
key: string;
url: string;
};
shouldFetch?: boolean;
}
export const useOpenaiSTT = (config: OpenAISTTConfig) => {
export const useOpenAISTTCore = (config: OpenAISTTCoreOptions) => {
const key = new Date().getDate().toString();
const { shouldFetch, api, options, speech, ...swrConfig } = config;

Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import { useCallback, useState } from 'react';

import { useOpenaiSTT } from '@/react/useOpenaiSTT/useOpenaiSTT';
import { usePersistedSpeechRecognition } from '@/react/useSpeechRecognition';
import { useOpenAISTTCore } from '@/react/useOpenAISTT/useOpenAISTTCore';
import { useSpeechRecognitionInteractive } from '@/react/useSpeechRecognition/useSpeechRecognitionInteractive';

import { STTConfig } from './useOpenaiSTTWithRecord';
import { OpenAISTTRecorderOptions } from './useOpenAISTTRecorder';

export const useOpenaiSTTWithPSR = (
export const useOpenAISTTInteractive = (
locale: string,
{
onBlobAvailable,
Expand All @@ -16,8 +16,12 @@ export const useOpenaiSTTWithPSR = (
onStart,
onStop,
options,
onRecognitionStop,
onRecognitionStart,
onRecognitionError,
onRecognitionFinish,
...restConfig
}: STTConfig = {},
}: OpenAISTTRecorderOptions = {},
) => {
const [isGlobalLoading, setIsGlobalLoading] = useState<boolean>(false);
const [shouldFetch, setShouldFetch] = useState<boolean>(false);
Expand All @@ -30,11 +34,15 @@ export const useOpenaiSTTWithPSR = (
isLoading: isRecording,
time,
formattedTime,
} = usePersistedSpeechRecognition(locale, {
} = useSpeechRecognitionInteractive(locale, {
onBlobAvailable: (blobData) => {
setShouldFetch(true);
onBlobAvailable?.(blobData);
},
onRecognitionError,
onRecognitionFinish,
onRecognitionStart,
onRecognitionStop,
onTextChange: (data) => {
setText(data);
onTextChange?.(data);
Expand All @@ -55,7 +63,7 @@ export const useOpenaiSTTWithPSR = (
setIsGlobalLoading(false);
}, [stop]);

const { isLoading } = useOpenaiSTT({
const { isLoading } = useOpenAISTTCore({
onError: (err, ...rest) => {
onError?.(err, ...rest);
console.error(err);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,19 @@ import { useCallback, useState } from 'react';
import { SWRConfiguration } from 'swr';

import { useAudioRecorder } from '@/react/useAudioRecorder';
import { useOpenaiSTT } from '@/react/useOpenaiSTT/useOpenaiSTT';
import { SpeechRecognitionOptions } from '@/react/useSpeechRecognition/useSpeechRecognition';
import { useOpenAISTTCore } from '@/react/useOpenAISTT/useOpenAISTTCore';
import { SpeechRecognitionRecorderOptions } from '@/react/useSpeechRecognition/useSpeechRecognitionAutoStop';

import { OpenAISTTConfig } from './useOpenaiSTT';
import { OpenAISTTCoreOptions } from './useOpenAISTTCore';

export interface STTConfig
extends SpeechRecognitionOptions,
export interface OpenAISTTRecorderOptions
extends SpeechRecognitionRecorderOptions,
SWRConfiguration,
Partial<OpenAISTTConfig> {
Partial<OpenAISTTCoreOptions> {
onFinished?: SWRConfiguration['onSuccess'];
onStart?: () => void;
onStop?: () => void;
}

export const useOpenaiSTTWithRecord = ({
export const useOpenAISTTRecorder = ({
onBlobAvailable,
onTextChange,
onSuccess,
Expand All @@ -26,7 +24,7 @@ export const useOpenaiSTTWithRecord = ({
onStop,
options,
...restConfig
}: STTConfig = {}) => {
}: OpenAISTTRecorderOptions = {}) => {
const [isGlobalLoading, setIsGlobalLoading] = useState<boolean>(false);
const [shouldFetch, setShouldFetch] = useState<boolean>(false);
const [text, setText] = useState<string>();
Expand All @@ -51,7 +49,7 @@ export const useOpenaiSTTWithRecord = ({
setIsGlobalLoading(false);
}, [stop]);

const { isLoading } = useOpenaiSTT({
const { isLoading } = useOpenAISTTCore({
onError: (err, ...rest) => {
onError?.(err, ...rest);
console.error(err);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { OpenAITTS } from '@lobehub/tts';
import { AudioPlayer, useOpenaiTTS } from '@lobehub/tts/react';
import { AudioPlayer, useOpenAITTS } from '@lobehub/tts/react';
import { Icon, StoryBook, useControls, useCreateStore } from '@lobehub/ui';
import { Button, Input } from 'antd';
import { Volume2 } from 'lucide-react';
Expand Down Expand Up @@ -35,7 +35,7 @@ export default () => {
},
{ store },
);
const { setText, isGlobalLoading, audio, start, stop } = useOpenaiTTS(defaultText, {
const { setText, isGlobalLoading, audio, start, stop } = useOpenAITTS(defaultText, {
api,
options,
});
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
nav: Components
group: TTS
title: useOpenaiTTS
title: useOpenAITTS
---

## hooks
Expand Down
Loading

0 comments on commit 8e2debc

Please sign in to comment.