From 5ddeebfcc02147e746e79214ce2ea4bf81a8f412 Mon Sep 17 00:00:00 2001 From: ikechan8370 Date: Sat, 4 Mar 2023 22:06:09 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E6=AD=A3=E5=BC=8F=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E8=AF=AD=E9=9F=B3=E6=A8=A1=E5=BC=8F=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- apps/chat.js | 90 +++++++++++++++++++++++++++++++++++++----------- apps/help.js | 10 ++++++ guoba.support.js | 14 +++++++- utils/common.js | 9 +++++ utils/config.js | 5 +-- utils/tts.js | 21 ++++++++++- 6 files changed, 124 insertions(+), 25 deletions(-) diff --git a/apps/chat.js b/apps/chat.js index c282cbd0..c4920ae5 100644 --- a/apps/chat.js +++ b/apps/chat.js @@ -6,7 +6,15 @@ import delay from 'delay' import { ChatGPTAPI } from 'chatgpt' import { BingAIClient } from '@waylaidwanderer/chatgpt-api' import SydneyAIClient from '../utils/SydneyAIClient.js' -import { render, getMessageById, makeForwardMsg, tryTimes, upsertMessage, randomString } from '../utils/common.js' +import { + render, + getMessageById, + makeForwardMsg, + tryTimes, + upsertMessage, + randomString, + getDefaultUserSetting +} from '../utils/common.js' import { ChatGPTPuppeteer } from '../utils/browser.js' import { KeyvFile } from 'keyv-file' import { OfficialChatGPTClient } from '../utils/message.js' @@ -96,6 +104,14 @@ export class chatgpt extends plugin { reg: '^#chatgpt文本模式$', fnc: 'switch2Text' }, + { + reg: '^#chatgpt语音模式$', + fnc: 'switch2Audio' + }, + { + reg: '^#chatgpt设置语音角色', + fnc: 'setDefaultRole' + }, { reg: '^#(chatgpt)清空(chat)?队列$', fnc: 'emptyQueue', @@ -175,7 +191,7 @@ export class chatgpt extends plugin { await this.reply('依赖keyv未安装,请执行pnpm install keyv', true) } const conversationsCache = new Keyv(conversation) - console.log(`SydneyUser_${e.sender.user_id}`, await conversationsCache.get(`SydneyUser_${e.sender.user_id}`)) + logger.info(`SydneyUser_${e.sender.user_id}`, await conversationsCache.get(`SydneyUser_${e.sender.user_id}`)) await conversationsCache.delete(`SydneyUser_${e.sender.user_id}`) await this.reply('已退出当前对话,该对话仍然保留。请@我进行聊天以开启新的对话', true) } else { @@ -313,11 +329,12 @@ export class chatgpt extends plugin { async switch2Picture (e) { let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`) if (!userSetting) { - userSetting = { usePicture: true } + userSetting = getDefaultUserSetting() } else { userSetting = JSON.parse(userSetting) } userSetting.usePicture = true + userSetting.useTTS = false await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting)) await this.reply('ChatGPT回复已转换为图片模式') } @@ -325,15 +342,49 @@ export class chatgpt extends plugin { async switch2Text (e) { let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`) if (!userSetting) { - userSetting = { usePicture: false } + userSetting = getDefaultUserSetting() } else { userSetting = JSON.parse(userSetting) } userSetting.usePicture = false + userSetting.useTTS = false await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting)) await this.reply('ChatGPT回复已转换为文字模式') } + async switch2Audio (e) { + if (!Config.ttsSpace) { + await this.reply('您没有配置VITS API,请前往锅巴面板进行配置') + return + } + let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`) + if (!userSetting) { + userSetting = getDefaultUserSetting() + } else { + userSetting = JSON.parse(userSetting) + } + userSetting.useTTS = true + await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting)) + await this.reply('ChatGPT回复已转换为语音模式') + } + + async setDefaultRole (e) { + if (!Config.ttsSpace) { + await this.reply('您没有配置VITS API,请前往锅巴面板进行配置') + return + } + let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`) + if (!userSetting) { + userSetting = getDefaultUserSetting() + } else { + userSetting = JSON.parse(userSetting) + } + let speaker = _.trimStart(e.msg, '#chatgpt设置语音角色') || '随机' + userSetting.ttsRole = convertSpeaker(speaker) + await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting)) + await this.reply(`您的默认语音角色已被设置为”${userSetting.ttsRole}“`) + } + /** * #chatgpt * @param e oicq传递的事件参数e @@ -354,8 +405,18 @@ export class chatgpt extends plugin { return false } } - let useTTS = false - let speaker = '' + let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`) + if (userSetting) { + userSetting = JSON.parse(userSetting) + if (Object.keys(userSetting).indexOf('useTTS') < 0) { + userSetting.useTTS = Config.defaultUseTTS + } + } else { + userSetting = getDefaultUserSetting() + } + let useTTS = !!userSetting.useTTS + let speaker = convertSpeaker(userSetting.ttsRole || Config.defaultTTSRole) + // 每个回答可以指定 let trySplit = prompt.split('回答:') if (trySplit.length > 1 && speakers.indexOf(convertSpeaker(trySplit[0])) > -1) { useTTS = true @@ -445,11 +506,6 @@ export class chatgpt extends plugin { } } logger.info(`chatgpt prompt: ${prompt}`) - // try { - // await this.chatGPTApi.init() - // } catch (e) { - // await this.reply('chatgpt初始化出错:' + e.msg, true) - // } let previousConversation let conversation = {} if (use === 'api3') { @@ -537,14 +593,7 @@ export class chatgpt extends plugin { await this.reply('返回内容存在敏感词,我不想回答你', true) return false } - let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`) - if (userSetting) { - userSetting = JSON.parse(userSetting) - } else { - userSetting = { - usePicture: Config.defaultUsePicture - } - } + let quotemessage = [] if (chatMessage?.quote) { chatMessage.quote.forEach(function (item, index) { @@ -554,7 +603,7 @@ export class chatgpt extends plugin { }) } if (useTTS) { - if (Config.ttsSpace && response.length <= 99) { + if (Config.ttsSpace && response.length <= 299) { let wav = await generateAudio(response, speaker, '中文') e.reply(segment.record(wav)) } else { @@ -885,7 +934,6 @@ export class chatgpt extends plugin { .then(response => response.json()) .then(data => { if (data.error) { - // console.log(data.error) this.reply('获取失败:' + data.error.code) return false } else { diff --git a/apps/help.js b/apps/help.js index a660adea..44200aa5 100644 --- a/apps/help.js +++ b/apps/help.js @@ -26,6 +26,16 @@ let helpData = [ title: '#chatgpt文本模式', desc: '机器人以文本形式回答,默认选项' }, + { + icon: 'text', + title: '#chatgpt语音模式', + desc: '机器人以语音形式回答' + }, + { + icon: 'text', + title: '#chatgpt设置语音角色', + desc: '设置语音模式下回复的角色音色' + }, { icon: 'text', title: '#chatgpt画图+prompt(/张数/图片大小)', diff --git a/guoba.support.js b/guoba.support.js index d54e6718..01b47036 100644 --- a/guoba.support.js +++ b/guoba.support.js @@ -46,9 +46,21 @@ export function supportGuoba () { { field: 'defaultUsePicture', label: '全局图片模式', - bottomHelpMessage: '全局默认以图片形式回复,并自动发出Continue命令补全回答。长回复可能会有bug。', + bottomHelpMessage: '全局默认以图片形式回复。', component: 'Switch' }, + { + field: 'defaultUseTTS', + label: '全局语音模式', + bottomHelpMessage: '全局默认以语音形式回复,使用默认角色音色。', + component: 'Switch' + }, + { + field: 'defaultTTSRole', + label: '语音模式默认橘色', + bottomHelpMessage: '语音模式下,未指定角色时使用的角色。若为空,将使用随机角色回复。', + component: 'Input' + }, { field: 'autoUsePicture', label: '长文本自动转图片', diff --git a/utils/common.js b/utils/common.js index 2ebcd9a7..539b88d4 100644 --- a/utils/common.js +++ b/utils/common.js @@ -5,6 +5,7 @@ import lodash from 'lodash' import fs from 'node:fs' import path from 'node:path' import puppeteer from '../../../lib/puppeteer/puppeteer.js' +import {Config} from "./config.js"; // export function markdownToText (markdown) { // return remark() // .use(stripMarkdown) @@ -287,3 +288,11 @@ export async function render (e, pluginKey, htmlPath, data = {}, renderCfg = {}) } return renderCfg.retType === 'msgId' ? ret : true } + +export function getDefaultUserSetting () { + return { + usePicture: Config.defaultUsePicture, + useTTS: Config.defaultUseTTS, + ttsRole: Config.defaultTTSRole + } +} diff --git a/utils/config.js b/utils/config.js index 040a262a..d555a097 100644 --- a/utils/config.js +++ b/utils/config.js @@ -1,12 +1,13 @@ import fs from 'fs' import lodash from 'lodash' -import { execSync } from 'child_process' const defaultConfig = { blockWords: ['屏蔽词1', '屏蔽词b'], promptBlockWords: ['屏蔽词1', '屏蔽词b'], imgOcr: true, defaultUsePicture: false, + defaultUseTTS: false, + defaultTTSRole: '纳西妲', autoUsePicture: true, autoUsePictureThreshold: 1200, conversationPreserveTime: 0, @@ -43,7 +44,7 @@ const defaultConfig = { noiseScale: 0.6, noiseScaleW: 0.668, lengthScale: 1.2, - version: 'v2.0.17' + version: 'v2.0.18' } const _path = process.cwd() let config = {} diff --git a/utils/tts.js b/utils/tts.js index 3152649e..addb3cff 100644 --- a/utils/tts.js +++ b/utils/tts.js @@ -1,5 +1,6 @@ import { Config } from './config.js' import fetch from 'node-fetch' +import random from 'random' let proxy if (Config.proxy) { try { @@ -24,7 +25,22 @@ const newFetch = (url, options = {}) => { return fetch(url, mergedOptions) } const space = Config.ttsSpace -export async function generateAudio (text, speaker = '琪亚娜', language = '中文', noiseScale = Config.noiseScale, noiseScaleW = Config.noiseScaleW, lengthScale = Config.lengthScale) { + +function randomNum (minNum, maxNum) { + switch (arguments.length) { + case 1: + return parseInt(Math.random() * minNum + 1, 10) + case 2: + return parseInt(Math.random() * (maxNum - minNum + 1) + minNum, 10) + default: + return 0 + } +} +export async function generateAudio (text, speaker = '随机', language = '中文', noiseScale = Config.noiseScale, noiseScaleW = Config.noiseScaleW, lengthScale = Config.lengthScale) { + if (!speaker || speaker === '随机') { + logger.info('随机角色!这次哪个角色这么幸运会被选到呢……') + speaker = speakers[randomNum(0, speakers.length)] + } logger.info(`正在使用${speaker},基于文本:'${text}'生成语音`) let body = { data: [ @@ -43,6 +59,9 @@ export async function generateAudio (text, speaker = '琪亚娜', language = ' if (Config.debug) { logger.info(json) } + if (response.status > 299) { + logger.info(json) + } let [message, audioInfo, take] = json?.data logger.info(message, take) let audioLink = `${space}/file=${audioInfo.name}`