Skip to content

Commit

Permalink
Merge pull request #4505 from traPtitech/fix/audio_device_id
Browse files Browse the repository at this point in the history
オーディオデバイスの設定を認識しない問題を修正
  • Loading branch information
nokhnaton authored Jan 27, 2025
2 parents 5d689c0 + 7dfe2bc commit 86bd0f9
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 25 deletions.
5 changes: 2 additions & 3 deletions src/components/Main/MainView/QallView/QallAudio.vue
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
<script setup lang="ts">
import { useQall } from '/@/composables/qall/useQall'
import AudioComponent from './AudioComponent.vue'
import VoiceComponent from './VoiceComponent.vue'
const { tracksMap, screenShareTrackSidMap, screenShareTracks } = useQall()
</script>
<template>
<template v-for="[sid, track] in Array.from(tracksMap.entries())" :key="sid">
<AudioComponent
<VoiceComponent
v-if="
track.trackPublication?.kind === 'audio' &&
track.isRemote &&
!screenShareTracks?.some?.(([_, valueSid]) => valueSid === sid)
"
:track-info="track"
:is-show="false"
/>
</template>
</template>
19 changes: 18 additions & 1 deletion src/components/Main/MainView/QallView/UserCard.vue
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,26 @@ import { computed } from 'vue'
import type { TrackInfo } from '/@/composables/qall/useLiveKitSDK'
import { useUsersStore } from '/@/store/entities/users'
import { buildUserIconPath } from '/@/lib/apis'
import { useQall } from '/@/composables/qall/useQall'
const { trackInfo } = defineProps<{
trackInfo: TrackInfo
}>()
const { speakerIdentitys } = useQall()
const { findUserByName } = useUsersStore()
const user = computed(() => findUserByName(trackInfo.username))
const userIconFileId = computed(() => user.value?.iconFileId ?? '')
const iconImage = computed(() => buildUserIconPath(userIconFileId.value))
const isSpeaking = computed(() => {
return (
user.value &&
speakerIdentitys.value.some(s => s.name === trackInfo.username)
)
})
</script>

<template>
<div v-if="user" :class="$style.UserCard">
<div v-if="user" :class="$style.UserCard" :data-is-speaking="isSpeaking">
<div :class="$style.OuterIcon">
<img :src="iconImage" :class="$style.OuterImage" />
</div>
Expand All @@ -23,6 +31,7 @@ const iconImage = computed(() => buildUserIconPath(userIconFileId.value))
</div>

<div :class="$style.NameLabel">{{ trackInfo.username }}</div>
<div v-show="isSpeaking" :class="$style.borderBox"></div>
</div>
</template>

Expand All @@ -35,6 +44,14 @@ const iconImage = computed(() => buildUserIconPath(userIconFileId.value))
border-radius: 12px;
pointer-events: none;
user-select: none;
box-sizing: border-box;
}
.borderBox {
border: 2px solid $common-ui-qall;
width: 100%;
height: 100%;
border-radius: 12px;
}
.InnerIcon {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import { useUsersStore } from '/@/store/entities/users'
import { buildUserIconPath } from '/@/lib/apis'
import AudioTrack from './AudioTrack.vue'
import { useUserVolume } from '/@/store/app/userVolume'
import UserCard from './UserCard.vue'
const { trackInfo, isShow } = defineProps<{
trackInfo: TrackInfo
isShow?: boolean
Expand All @@ -29,14 +28,6 @@ const parseToFloat = (value: number | string): number => {
</script>

<template>
<div :class="isShow ? $style.container : []">
<UserCard :track-info="trackInfo" />
<AudioTrack :track-info="trackInfo" :volume="parseToFloat(volume)" />
</div>
<AudioTrack :track-info="trackInfo" :volume="parseToFloat(volume)" />
</template>
<style lang="scss" module>
.container {
width: 100%;
height: 100%;
}
</style>
<style lang="scss" module></style>
31 changes: 21 additions & 10 deletions src/composables/qall/useLiveKitSDK.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ import {
AudioPresets,
createLocalScreenTracks,
Room,
LocalVideoTrack
LocalVideoTrack,
LocalAudioTrack
} from 'livekit-client'
import type {
RemoteTrack,
Expand Down Expand Up @@ -85,7 +86,7 @@ type CameraProcessor = {
const room = ref<Room>()
const audioContext = ref<AudioContext>()
const isRnnoiseSupported = computed(() => !!audioContext.value)
const speakerIdentity = ref<string[]>([])
const speakerIdentitys = ref<{ identity: string; name?: string }[]>([])
const tracksMap: Ref<Map<string, TrackInfo>> = ref(new Map())
const cameraProcessorMap: Ref<Map<string, CameraProcessor>> = ref(new Map())
const screenShareTrackSidMap = ref<Map<string, string>>(new Map())
Expand Down Expand Up @@ -141,7 +142,7 @@ function handleLocalTrackPublished(

function handleActiveSpeakerChange(speakers: Participant[]) {
// show UI indicators when participant is speaking
speakerIdentity.value = speakers.map(s => s.identity)
speakerIdentitys.value = speakers
}

function handleDisconnect() {
Expand Down Expand Up @@ -264,8 +265,7 @@ async function leaveRoom() {
const addMicTrack = async () => {
let stream: MediaStream | undefined

const noiseSuppression = useRtcSettings().noiseSuppression
.value as NoiseSuppressionType
const { noiseSuppression, audioInputDeviceId } = useRtcSettings()
try {
if (!room.value?.localParticipant?.permissions?.canPublish) {
throw new Error('権限がありません')
Expand All @@ -275,12 +275,21 @@ const addMicTrack = async () => {
audioContext.value = new AudioContext()
}

stream = await navigator.mediaDevices.getUserMedia({ audio: true })
stream = await navigator.mediaDevices.getUserMedia({
audio: {
deviceId: {
ideal: audioInputDeviceId.value
},
autoGainControl: true,
noiseSuppression: true,
echoCancellation: true
}
})
const source = audioContext.value.createMediaStreamSource(stream)

let lastNode: AudioNode = source

if (noiseSuppression === 'rnnoise') {
if (noiseSuppression.value === 'rnnoise') {
const [rnnoiseBinary] = await Promise.all([
loadRnnoiseWasmBinary(),
audioContext.value?.audioWorklet.addModule(rnnoiseWorkletPath)
Expand All @@ -291,7 +300,7 @@ const addMicTrack = async () => {
})
source.connect(rnnoiseNode)
lastNode = rnnoiseNode
} else if (noiseSuppression === 'speex') {
} else if (noiseSuppression.value === 'speex') {
const [speexBinary] = await Promise.all([
loadSpeexWasmBinary(),
audioContext.value?.audioWorklet.addModule(speexWorkletPath)
Expand All @@ -313,9 +322,11 @@ const addMicTrack = async () => {
}

audioTrackId.value = audioTrack.id
const livekitAudioTrack = new LocalAudioTrack(audioTrack, undefined, false)
livekitAudioTrack.source = Track.Source.Microphone

// Publish the processed stream
await room.value.localParticipant.publishTrack(audioTrack, {
await room.value.localParticipant.publishTrack(livekitAudioTrack, {
audioPreset: AudioPresets.speech,
forceStereo: true,
red: false,
Expand All @@ -334,7 +345,6 @@ const addMicTrack = async () => {
addErrorToast('マイクの共有に失敗しました')
}
}

const removeMicTrack = async () => {
try {
if (!room.value) {
Expand Down Expand Up @@ -633,6 +643,7 @@ export const useLiveKitSDK = () => {
tracksMap,
screenShareTrackSidMap,
screenShareTracks,
speakerIdentitys,
isMicOn,
qallMitt
}
Expand Down
2 changes: 2 additions & 0 deletions src/composables/qall/useQall.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ const {
tracksMap,
screenShareTrackSidMap,
screenShareTracks,
speakerIdentitys,
isMicOn,
qallMitt
} = useLiveKitSDK()
Expand Down Expand Up @@ -321,6 +322,7 @@ export const useQall = () => {
isMicOn,
isCameraOn,
isScreenSharing,
speakerIdentitys,
selectedTrack
}
}

0 comments on commit 86bd0f9

Please sign in to comment.