Compare commits

..

3 Commits

Author SHA1 Message Date
8a0c1d6876 status 2025-07-29 09:53:02 +08:00
f0bf3b6184 优化衔接处bug 2025-07-29 02:50:08 +08:00
a96fc86d42 处理结尾判断 2025-07-29 01:36:40 +08:00
43 changed files with 549 additions and 2460 deletions

View File

@ -1,44 +0,0 @@
name: Gitea Actions Demo
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
on:
push:
branches:
- 'new_female'
env:
BUILD: staging
jobs:
Explore-Gitea-Actions:
runs-on: stream9
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
- run: echo "🔎 The name of your branch is ${{ gitea.ref }} and your repository is ${{ gitea.repository }}."
- name: Check out repository code
uses: https://gitea.yantootech.com/neil/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
- run: echo "🖥️ The workflow is now ready to test your code on the runner."
- name: List files in the repository
run: |
whoami
uname -a
pwd
ls ${{ gitea.workspace }}
- name: Build and push
uses: https://gitea.yantootech.com/neil/build-push-action@v6
with:
push: false
tags: emotion-female-digital-video:${{ gitea.run_id }}
- name: Run docker
run: |
pwd
if [ "$(docker ps -q -f name=^emotion-female-digital-video$)" ]; then
docker stop emotion-female-digital-video
fi
docker run -d --rm --name emotion-female-digital-video \
-v /usr/share/fonts/opentype/noto:/usr/share/fonts \
-p 6901:3000 \
emotion-female-digital-video:${{ gitea.run_id }}
- run: echo "🍏 This job's status is ${{ job.status }}."

View File

@ -1,24 +0,0 @@
# 使用官方Node.js运行时作为基础镜像
FROM node:18-alpine
# 设置工作目录
WORKDIR /app
# 复制package.json和yarn.lock
COPY package.json yarn.lock* ./
# 安装项目依赖
RUN yarn install
# 复制项目文件
COPY . .
# 设置环境变量
ENV HOST=0.0.0.0
ENV PORT=3000
# 暴露端口
EXPOSE 3000
# 启动项目
CMD ["yarn", "dev"]

View File

@ -1,129 +0,0 @@
# 默认视频播放问题修复
## 问题描述
在性能优化过程中,发现默认视频 `d-3s.mp4``s-1.mp4` 没有正常播放的问题。
## 问题原因
1. **缓存策略过于激进**: 将缓存数量从3个减少到2个导致重要视频被过早清理
2. **缺少默认视频启动调用**: 在 `startCall()` 方法中没有调用 `startDefaultVideoStream()`
3. **重要视频保护不足**: 没有区分重要视频和普通视频的清理策略
## 修复措施
### 1. 优化缓存策略
```javascript
// 修复前
if (this.videoStreams.size >= 2) { // 缓存数量过少
const firstKey = this.videoStreams.keys().next().value;
// 直接清理第一个视频,可能包括重要视频
}
// 修复后
if (this.videoStreams.size >= 4) { // 增加缓存数量
const importantVideos = [this.defaultVideo, 's-1.mp4', 'd-3s.mp4'];
const videoToRemove = cachedVideos.find(video => !importantVideos.includes(video));
// 只清理非重要视频
}
```
### 2. 添加默认视频启动调用
```javascript
async startCall() {
// ... 其他代码 ...
// 启动默认视频流
await this.startDefaultVideoStream();
// 通知服务器通话开始
this.socket.emit('call-started');
}
```
### 3. 改进预加载策略
```javascript
async preloadCommonVideos() {
const videosToPreload = new Set([]);
// 添加重要视频(默认视频和常用视频)
videosToPreload.add(this.defaultVideo); // 默认视频
videosToPreload.add('s-1.mp4'); // 常用视频
videosToPreload.add('d-3s.mp4'); // 默认视频的另一个版本
// 添加视频映射中的所有视频
Object.values(this.videoMapping).forEach(video => {
videosToPreload.add(video);
});
}
```
### 4. 优化性能监控清理
```javascript
// 如果缓存过多,清理一些(但保护重要视频)
if (this.videoStreams.size > 5) {
const importantVideos = [this.defaultVideo, 's-1.mp4', 'd-3s.mp4'];
// 只清理非重要视频
const videosToRemove = cachedVideos.filter(video => !importantVideos.includes(video));
videosToRemove.slice(0, 2).forEach(key => {
this.cleanupVideoResources(key);
});
}
```
### 5. 调整性能测试阈值
```javascript
// 检查视频流数量
if (testResults.metrics.videoStreamsCount > 5) { // 从3增加到5
// 报告问题
}
// 检查动画帧数量
if (testResults.metrics.animationFramesCount > 3) { // 从2增加到3
// 报告问题
}
```
## 重要视频列表
以下视频被标记为重要视频,不会被自动清理:
- `d-3s.mp4` - 默认视频
- `s-1.mp4` - 常用视频
- 当前默认视频(`this.defaultVideo`
## 测试功能
添加了测试功能来验证默认视频播放:
1. **测试按钮**: "测试默认视频" 按钮
2. **测试方法**: `testDefaultVideoPlayback()`
3. **测试流程**:
- 检查默认视频文件是否存在
- 创建默认视频流
- 设置到视频元素并播放
- 5秒后自动停止测试
## 验证步骤
1. 启动应用
2. 点击"开始音频通话"
3. 观察默认视频是否开始播放
4. 点击"测试默认视频"按钮验证功能
5. 查看性能监控面板确认视频流数量
## 预期效果
修复后,默认视频应该能够:
1. **正常播放**: 通话开始时自动播放默认视频
2. **不被清理**: 重要视频不会被自动清理机制删除
3. **快速切换**: 预加载确保切换时响应迅速
4. **稳定运行**: 性能监控不会误报重要视频为问题
## 监控指标
- **视频流数量**: 正常范围 1-5 个
- **重要视频保护**: 确保 `d-3s.mp4``s-1.mp4` 不被清理
- **默认视频状态**: 通话开始时应该显示默认视频

View File

@ -1,22 +0,0 @@
version: '3.8'
services:
webrtc-app:
build: .
ports:
- "3000:3000"
volumes:
- ./videos:/app/videos
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks:
- webrtc-network
networks:
webrtc-network:
driver: bridge

View File

@ -1,3 +0,0 @@
{
"currentSceneIndex": 0
}

312
server.js
View File

@ -8,17 +8,7 @@ const { MessageHistory } = require('./src/message_history.js');
const app = express();
const server = http.createServer(app);
const io = socketIo(server, {
pingTimeout: 300000, // 60秒超时
pingInterval: 25000, // 25秒心跳间隔
upgradeTimeout: 30000, // 30秒升级超时
allowEIO3: true, // 允许Engine.IO v3客户端
transports: ['websocket', 'polling'], // 支持多种传输方式
cors: {
origin: "*",
methods: ["GET", "POST"]
}
});
const io = socketIo(server);
// 创建消息历史管理器
const messageHistory = new MessageHistory();
@ -95,170 +85,18 @@ app.delete('/api/messages/clear', async (req, res) => {
// 存储连接的客户端和他们的视频流状态
const connectedClients = new Map();
// 场景轮询系统
// 场景轮询系统 - 添加持久化
// 删除这行const fs = require('fs'); // 重复声明,需要删除
const sceneStateFile = path.join(__dirname, 'scene_state.json');
// 从文件加载场景状态
function loadSceneState() {
try {
if (fs.existsSync(sceneStateFile)) {
const data = fs.readFileSync(sceneStateFile, 'utf8');
const state = JSON.parse(data);
currentSceneIndex = state.currentSceneIndex || 0;
console.log(`从文件加载场景状态: ${currentSceneIndex} (${scenes[currentSceneIndex].name})`);
} else {
console.log('场景状态文件不存在,使用默认值: 0');
}
} catch (error) {
console.error('加载场景状态失败:', error);
currentSceneIndex = 0;
}
}
// 保存场景状态到文件
function saveSceneState() {
try {
const state = { currentSceneIndex };
fs.writeFileSync(sceneStateFile, JSON.stringify(state, null, 2));
console.log(`场景状态已保存: ${currentSceneIndex}`);
} catch (error) {
console.error('保存场景状态失败:', error);
}
}
let currentSceneIndex = 0;
const scenes = [
{
name: '起床-坐在床上',
defaultVideo: '8-5-qc-bd-female.mp4',
interactionVideo: '8-5-qc-sh-female.mp4',
tag: 'wakeup',
apiKey: 'bot-20250730213756-l627w', // 起床场景的API key
openingLines: [
'早啊~ 我头发是不是超乱?',
"刚醒就看到你,比闹钟好使一百倍。",
// "昨晚梦里喊你名字喊醒了,现在视频看到你,心跳比梦里还烫。",
// "你穿我T恤的样子...让我想咬住衣领不放。",
"宝宝早安!我昨晚梦见你啦。",
// "喉咙里有咖啡味...要不要来尝尝?记得上次你咬我锁骨时,咖啡还温着。",
"早,宝宝。吃早餐没?"
]
},
{
name: '走路去公司的路上',
defaultVideo: '8-5-sb-bd-female.mp4',
interactionVideo: '8-5-sb-sh-female.mp4',
tag: 'walk',
apiKey: 'bot-20250703161810-sgkrh', // 开车场景的API key
openingLines: [
// '宝宝,看到你那边阳光好好哦。我这边有点风,吹着挺舒服的,要是你在旁边牵着手就更好了。',
'宝宝,我快到公司啦。今天太阳真好!',
// '看到那个大钟楼没?每次路过都想你。想着要是能牵着你的手,站在最高层往下看该多好。',
'宝宝,我快到公司了,你到公司了吗?',
'宝宝,今天早晨的太阳真舒服!',
]
},
{
name: '在公司楼下喝咖啡',
defaultVideo: '8-8-hc-bd-2.mp4',
interactionVideo: '8-5-hc-sh-female.mp4',
tag: 'coffee',
apiKey: 'bot-20250805140055-ccdr6', // 喝茶场景的API key
openingLines: [
// '宝宝,今天这杯拿铁拉花好丑哦... 不过没关系,看着你的帅脸就平衡啦,你比咖啡提神!',
'嗯... 今天的咖啡香香的。',
'宝宝,我今天换了个口味,燕麦拿铁!',
// '宝宝,刚才店员问我糖浆加多少,我脱口而出‘和我男朋友一样’,说完自己都脸红了...',
// '对了宝宝,昨天说帮你找的资料,我存手机了,喝完这杯咖啡就发你哈!记得看。',
// '这杯好苦…但一看到你,就自动回甘了。比加十包糖都管用。你说你是不是我的专属甜味剂?'
]
},
{
name: '敷面膜-准备睡觉',
defaultVideo: '8-8-sj-bd.mp4',
interactionVideo: '8-8-sj-sh.mp4',
tag: 'sleep',
apiKey: 'bot-20250808120020-jfkmk', // 睡觉场景的API key
openingLines: [
'宝宝~ 我在敷面膜呢,你在干嘛?',
// '嗯?宝宝打来啦... 刚躺下贴上面膜,你就来了,像算准时间一样,真贴心。',
'哈,宝宝,选了个清洁面膜,有点刺刺的。你用的啥洗面奶来着?忘了...',
'在敷面膜呢,宝宝看我像不像外星人?',
'宝宝,今天累不累?',
// '宝宝... 这面膜说要敷15分钟正好陪你唠会儿。不过我得小声怕长皱纹',
// '好啦宝宝,面膜快干了,得去洗了。你先睡?... 嗯,梦里见呗。'
]
}
];
// 获取当前场景
function getCurrentScene() {
return scenes[currentSceneIndex];
}
// 切换到下一个场景 - 改进版
function switchToNextScene() {
const previousIndex = currentSceneIndex;
const previousScene = scenes[previousIndex].name;
currentSceneIndex = (currentSceneIndex + 1) % scenes.length;
const newScene = getCurrentScene();
console.log(`场景切换: ${previousScene}(${previousIndex}) → ${newScene.name}(${currentSceneIndex})`);
// 保存状态到文件
saveSceneState();
return newScene;
}
// 在服务器启动时加载场景状态
async function initializeServer() {
try {
// 加载场景状态
loadSceneState();
await messageHistory.initialize();
console.log('消息历史初始化完成');
console.log(`当前场景: ${getCurrentScene().name} (索引: ${currentSceneIndex})`);
} catch (error) {
console.error('初始化服务器失败:', error);
}
}
// 视频映射配置 - 动态更新
function getVideoMapping() {
const currentScene = getCurrentScene();
return {
'defaultVideo': currentScene.defaultVideo,
'interactionVideo': currentScene.interactionVideo,
'tag': currentScene.tag
};
}
// 默认视频流配置 - 动态获取
function getDefaultVideo() {
return getCurrentScene().defaultVideo;
}
let currentScene = getCurrentScene();
// 视频映射配置
const videoMapping = {
// 'say-6s-m-e': '1-m.mp4',
'default': currentScene.defaultVideo,
'8-4-sh': currentScene.interactionVideo,
'tag': currentScene.tag
'default': 'd-3s.mp4',
// 'say-5s-amplitude': '2.mp4',
// 'say-5s-m-e': '4.mp4',
// 'say-5s-m-sw': 'd-0.mp4',
// 'say-3s-m-sw': '6.mp4',
// 'say-5s-m-sw': '5.mp4',
'say-3s-m-sw': 's-1.mp4',
};
// 默认视频流配置
const DEFAULT_VIDEO = currentScene.defaultVideo;
const DEFAULT_VIDEO = 'd-3s.mp4';
const INTERACTION_TIMEOUT = 10000; // 10秒后回到默认视频
// 获取视频列表
@ -275,76 +113,26 @@ app.get('/api/videos', (req, res) => {
});
});
// 获取当前场景信息的API接口
app.get('/api/current-scene', (req, res) => {
const scene = getCurrentScene();
res.json({
name: scene.name,
tag: scene.tag,
apiKey: scene.apiKey,
defaultVideo: scene.defaultVideo,
interactionVideo: scene.interactionVideo
});
});
// 获取视频映射
app.get('/api/video-mapping', (req, res) => {
const currentMapping = getVideoMapping();
const dynamicMapping = {
'default': currentMapping.defaultVideo,
'8-4-sh': currentMapping.interactionVideo,
'tag': currentMapping.tag
};
res.json({ mapping: dynamicMapping });
res.json({ mapping: videoMapping });
});
// 获取默认视频
app.get('/api/default-video', (req, res) => {
res.json({
defaultVideo: getDefaultVideo(),
defaultVideo: DEFAULT_VIDEO,
autoLoop: true
});
});
// 在现有的API接口后添加
app.get('/api/current-scene/opening-line', (req, res) => {
try {
const currentScene = getCurrentScene();
if (currentScene && currentScene.openingLines && currentScene.openingLines.length > 0) {
// 随机选择一个开场白
const randomIndex = Math.floor(Math.random() * currentScene.openingLines.length);
const selectedOpeningLine = currentScene.openingLines[randomIndex];
res.json({
success: true,
openingLine: selectedOpeningLine,
sceneName: currentScene.name,
sceneTag: currentScene.tag
});
} else {
res.json({
success: false,
message: '当前场景没有配置开场白'
});
}
} catch (error) {
console.error('获取开场白失败:', error);
res.status(500).json({
success: false,
message: '获取开场白失败',
error: error.message
});
}
});
// Socket.IO 连接处理
io.on('connection', (socket) => {
console.log('用户连接:', socket.id);
connectedClients.set(socket.id, {
socket: socket,
currentVideo: getDefaultVideo(),
isInInteraction: false,
hasTriggeredSceneSwitch: false // 添加这个标志
currentVideo: DEFAULT_VIDEO,
isInInteraction: false
});
// 处理WebRTC信令 - 用于传输视频流
@ -393,21 +181,21 @@ io.on('connection', (socket) => {
});
// 如果是交互类型,设置定时器回到默认视频
// if (type === 'text' || type === 'voice') {
// setTimeout(() => {
// console.log(`交互超时,用户 ${socket.id} 回到默认视频`);
// if (client) {
// client.currentVideo = getDefaultVideo();
// client.isInInteraction = false;
// }
// // 广播回到默认视频的指令
// io.emit('video-stream-switched', {
// videoFile: getDefaultVideo(),
// type: 'default',
// from: socket.id
// });
// }, INTERACTION_TIMEOUT);
// }
if (type === 'text' || type === 'voice') {
setTimeout(() => {
console.log(`交互超时,用户 ${socket.id} 回到默认视频`);
if (client) {
client.currentVideo = DEFAULT_VIDEO;
client.isInInteraction = false;
}
// 广播回到默认视频的指令
io.emit('video-stream-switched', {
videoFile: DEFAULT_VIDEO,
type: 'default',
from: socket.id
});
}, INTERACTION_TIMEOUT);
}
});
// 处理通话开始
@ -415,7 +203,7 @@ io.on('connection', (socket) => {
console.log('通话开始,用户:', socket.id);
const client = connectedClients.get(socket.id);
if (client) {
client.currentVideo = getDefaultVideo();
client.currentVideo = DEFAULT_VIDEO;
client.isInInteraction = false;
}
io.emit('call-started', { from: socket.id });
@ -474,57 +262,15 @@ io.on('connection', (socket) => {
console.log('用户请求回到默认视频:', socket.id);
const client = connectedClients.get(socket.id);
if (client) {
client.currentVideo = getDefaultVideo();
client.currentVideo = DEFAULT_VIDEO;
client.isInInteraction = false;
}
socket.emit('switch-video-stream', {
videoFile: getDefaultVideo(),
videoFile: DEFAULT_VIDEO,
type: 'default'
});
});
// 处理用户关闭连接事件
socket.on('user-disconnect', () => {
console.log('=== 场景切换开始 ===');
console.log('用户主动关闭连接:', socket.id);
console.log('切换前场景:', getCurrentScene().name, '(索引:', currentSceneIndex, ')');
// 切换到下一个场景
const newScene = switchToNextScene();
console.log('切换后场景:', newScene.name, '(索引:', currentSceneIndex, ')');
// 检查是否已经处理过场景切换
const client = connectedClients.get(socket.id);
if (client && client.hasTriggeredSceneSwitch) {
console.log('场景切换已处理,跳过重复触发');
return;
}
// 标记已处理场景切换
if (client) {
client.hasTriggeredSceneSwitch = true;
}
// 更新videoMapping
const newMapping = getVideoMapping();
videoMapping['default'] = newMapping.defaultVideo;
videoMapping['8-4-sh'] = newMapping.interactionVideo;
videoMapping['tag'] = newMapping.tag;
// 广播场景切换事件给所有客户端
io.emit('scene-switched', {
scene: newScene,
mapping: {
defaultVideo: newMapping.defaultVideo,
interactionVideo: newMapping.interactionVideo,
tag: newMapping.tag,
'default': newMapping.defaultVideo,
'8-4-sh': newMapping.interactionVideo
},
from: socket.id
});
});
// 断开连接
socket.on('disconnect', () => {
console.log('用户断开连接:', socket.id);
@ -534,11 +280,11 @@ io.on('connection', (socket) => {
// 启动服务器
const PORT = process.env.PORT || 3000;
server.listen(PORT, '0.0.0.0', async () => {
server.listen(PORT, async () => {
console.log(`服务器运行在端口 ${PORT}`);
await initializeServer();
});
// 导出消息历史管理器供其他模块使用
module.exports = { messageHistory };
console.log(`访问 http://localhost:${PORT} 开始使用`);
console.log(`访问 http://localhost:${PORT} 开始使用`);

View File

@ -3,7 +3,6 @@
class AudioProcessor {
constructor(options = {}) {
this.audioContext = null;
this.stream = null; // 添加这一行
this.isRecording = false;
this.audioChunks = [];
@ -312,29 +311,22 @@ class AudioProcessor {
}
// 开始录音
async startRecording(existingStream = null) {
async startRecording() {
try {
// 如果有外部提供的音频流,使用它;否则获取新的
if (existingStream) {
this.stream = existingStream;
console.log('使用外部提供的音频流');
} else {
this.stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true
}
});
console.log('获取新的音频流');
}
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true
}
});
this.audioContext = new (window.AudioContext || window.webkitAudioContext)({
sampleRate: 16000
});
const source = this.audioContext.createMediaStreamSource(this.stream);
const source = this.audioContext.createMediaStreamSource(stream);
const processor = this.audioContext.createScriptProcessor(4096, 1, 1);
processor.onaudioprocess = (event) => {
@ -351,13 +343,9 @@ class AudioProcessor {
source.connect(processor);
processor.connect(this.audioContext.destination);
// 保存处理器引用以便后续清理
this.processor = processor;
this.source = source;
this.isRecording = true;
this.onStatusUpdate('等待语音输入...', 'ready');
// 在startRecording方法的最后添加
if (this.adaptiveThreshold && this.noiseCalibrationSamples.length === 0) {
this.onStatusUpdate('正在校准背景噪音,请保持安静...', 'calibrating');
@ -374,34 +362,8 @@ class AudioProcessor {
// 停止录音
stopRecording() {
console.log('开始停止录音...');
// 断开音频节点连接
if (this.source) {
this.source.disconnect();
this.source = null;
}
if (this.processor) {
this.processor.disconnect();
this.processor = null;
}
// 停止所有音频轨道
if (this.stream) {
this.stream.getTracks().forEach(track => {
track.stop();
console.log(`停止音频轨道: ${track.label}`);
});
this.stream = null;
}
if (this.audioContext) {
this.audioContext.close().then(() => {
console.log('AudioContext已关闭');
}).catch(err => {
console.error('关闭AudioContext时出错:', err);
});
this.audioContext.close();
this.audioContext = null;
}
@ -415,20 +377,12 @@ class AudioProcessor {
this.handleSpeechEnd();
}
// 重置所有状态
this.isRecording = false;
this.isSpeaking = false;
this.audioBuffer = [];
this.audioChunks = [];
this.consecutiveFramesCount = 0;
this.frameBuffer = [];
// 重置校准状态,确保下次启动时重新校准
this.noiseCalibrationSamples = [];
this.isCalibrated = false;
this.onStatusUpdate('录音已完全停止', 'stopped');
console.log('录音已完全停止,所有资源已释放');
this.onStatusUpdate('录音已停止', 'stopped');
console.log('录音已停止');
}
// 获取录音状态

View File

@ -2,7 +2,7 @@
import { requestLLMStream } from './llm_stream.js';
import { requestMinimaxi } from './minimaxi_stream.js';
import { getLLMConfig, getLLMConfigByScene, getMinimaxiConfig, getAudioConfig, validateConfig } from './config.js';
import { getLLMConfig, getMinimaxiConfig, getAudioConfig, validateConfig } from './config.js';
// 防止重复播放的标志
let isPlaying = false;
@ -26,13 +26,12 @@ async function initializeHistoryMessage(recentCount = 5) {
const data = await response.json();
historyMessage = data.messages || [];
isInitialized = true;
console.log("历史消息初始化完成:", historyMessage.length, "条消息", historyMessage);
console.log("历史消息初始化完成:", historyMessage.length, "条消息");
return historyMessage;
} catch (error) {
console.error('获取历史消息失败,使用默认格式:', error);
historyMessage = [
// { role: 'system', content: 'You are a helpful assistant.' }
{ role: 'system', content: 'You are a helpful assistant.' }
];
isInitialized = true;
return historyMessage;
@ -43,7 +42,7 @@ async function initializeHistoryMessage(recentCount = 5) {
function getCurrentHistoryMessage() {
if (!isInitialized) {
console.warn('历史消息未初始化,返回默认消息');
return [];
return [{ role: 'system', content: 'You are a helpful assistant.' }];
}
return [...historyMessage]; // 返回副本,避免外部修改
}
@ -73,26 +72,19 @@ function updateHistoryMessage(userInput, assistantResponse) {
// 保存消息到服务端
async function saveMessage(userInput, assistantResponse) {
try {
// 验证参数是否有效
if (!userInput || !userInput.trim() || !assistantResponse || !assistantResponse.trim()) {
console.warn('跳过保存消息:用户输入或助手回复为空');
return;
}
const response = await fetch('/api/messages/save', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
userInput: userInput.trim(),
assistantResponse: assistantResponse.trim()
userInput,
assistantResponse
})
});
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(`保存消息失败: ${response.status} ${errorData.error || response.statusText}`);
throw new Error('保存消息失败');
}
console.log('消息已保存到服务端');
@ -104,7 +96,7 @@ async function saveMessage(userInput, assistantResponse) {
async function chatWithAudioStream(userInput) {
// 确保历史消息已初始化
if (!isInitialized) {
await initializeHistoryMessage(100);
await initializeHistoryMessage();
}
// 验证配置
@ -114,19 +106,16 @@ async function chatWithAudioStream(userInput) {
console.log('用户输入:', userInput);
// 获取当前场景对应的配置
const llmConfig = await getLLMConfigByScene();
// 获取配置
const llmConfig = getLLMConfig();
const minimaxiConfig = getMinimaxiConfig();
const audioConfig = getAudioConfig();
console.log(`当前场景: ${llmConfig.sceneName} (${llmConfig.sceneTag})`);
console.log(`使用API Key: ${llmConfig.model}...`);
// 清空音频队列
audioQueue = [];
// 定义段落处理函数
const handleSegment = async (segment, textPlay) => {
const handleSegment = async (segment) => {
console.log('\n=== 处理文本段落 ===');
console.log('段落内容:', segment);
@ -145,7 +134,6 @@ async function chatWithAudioStream(userInput) {
audio_setting: audioConfig.audioSetting,
},
stream: true,
textPlay: textPlay
});
// 将音频添加到播放队列
@ -197,7 +185,7 @@ async function chatWithAudioStream(userInput) {
}
// 导出初始化函数,供外部调用
export { chatWithAudioStream, initializeHistoryMessage, getCurrentHistoryMessage, saveMessage, updateHistoryMessage };
export { chatWithAudioStream, initializeHistoryMessage, getCurrentHistoryMessage };
// 处理音频播放队列
async function processAudioQueue() {
@ -322,4 +310,4 @@ async function playAudioStreamNode(audioHex) {
// export { chatWithAudioStream, playAudioStream, playAudioStreamNode, initializeHistoryMessage, getCurrentHistoryMessage };
// export { chatWithAudioStream, playAudioStream, playAudioStreamNode, initializeHistoryMessage, getCurrentHistoryMessage };

View File

@ -3,7 +3,7 @@ export const config = {
// LLM API配置
llm: {
apiKey: 'd012651b-a65b-4b13-8ff3-cc4ff3a29783', // 请替换为实际的API密钥
model: 'bot-20250730213756-l627w',
model: 'bot-20250720193048-84fkp',
},
// Minimaxi API配置
@ -16,7 +16,7 @@ export const config = {
audio: {
model: 'speech-02-hd',
voiceSetting: {
voice_id: 'yantu-qinggang-3',
voice_id: 'yantu-qinggang-2',
speed: 1,
vol: 1,
pitch: 0,
@ -70,32 +70,13 @@ export function validateConfig() {
}
// 获取配置的便捷方法
export function getLLMConfig(sceneApiKey = null) {
export function getLLMConfig() {
return {
apiKey: config.llm.apiKey, // 如果提供了场景API key则使用它
model: sceneApiKey || config.llm.model,
apiKey: config.llm.apiKey,
model: config.llm.model,
};
}
// 新增根据场景获取LLM配置
export async function getLLMConfigByScene() {
try {
const response = await fetch('/api/current-scene');
const sceneData = await response.json();
return {
apiKey: config.llm.apiKey,
model: sceneData.apiKey,
sceneTag: sceneData.tag,
sceneName: sceneData.name
};
} catch (error) {
console.warn('获取场景配置失败,使用默认配置:', error);
return getLLMConfig(); // 回退到默认配置
}
}
export function getMinimaxiConfig() {
return {
apiKey: config.minimaxi.apiKey,
@ -110,4 +91,4 @@ export function getAudioConfig() {
audioSetting: config.audio.audioSetting,
...config.system,
};
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

View File

@ -2,527 +2,71 @@
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<title>Soulmate In Parallels - 壹和零人工智能</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>WebRTC 音频通话</title>
<link rel="stylesheet" href="styles.css">
<link rel="icon" type="image/png" sizes="48x48" href="favicon.png" />
<style>
/* 全屏视频样式 */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
html, body {
height: 100%;
overflow: hidden;
background: linear-gradient(135deg, #87CEEB 0%, #B0E0E6 100%); /* 浅蓝色渐变背景 */
}
.container {
width: 100vw;
height: 100vh;
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
position: relative;
}
.main-content {
flex: 1;
background: transparent;
border-radius: 0;
padding: 0;
box-shadow: none;
width: 100%;
height: 100%;
display: flex;
flex-direction: column;
}
.recorded-video-section {
flex: 1;
display: flex;
align-items: center;
justify-content: center;
width: 100%;
height: 100%;
position: relative;
/* 确保视频区域固定高度并居中 */
min-height: 100vh;
max-height: 100vh;
}
/* 视频容器样式 - 支持双缓冲固定9:16比例 */
.video-container {
position: relative;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
height: 100vh;
overflow: hidden;
display: flex;
align-items: center;
justify-content: center;
margin: 0 auto; /* 水平居中 */
}
#recordedVideo, #recordedVideoBuffer {
position: absolute;
width: 56.25vh; /* 9:16比例高度为100vh时宽度为100vh * 9/16 = 56.25vh */
height: 100vh;
object-fit: cover;
border-radius: 0;
box-shadow: none;
transition: opacity 0.5s ease-in-out;
/* 确保视频始终居中 */
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
/* 主视频默认显示 */
#recordedVideo {
opacity: 1;
z-index: 2;
}
/* 缓冲视频默认隐藏 */
#recordedVideoBuffer {
opacity: 0;
z-index: 1;
}
/* 切换状态 */
#recordedVideo.switching {
opacity: 0;
}
#recordedVideoBuffer.switching {
opacity: 1;
}
/* 加载状态 */
.video-loading {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 10;
color: white;
font-size: 18px;
opacity: 0;
transition: opacity 0.3s ease;
}
.video-loading.show {
opacity: 1;
}
/* 等待连接提示样式 */
.connection-waiting {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 20;
color: white;
font-size: 18px;
text-align: center;
background: rgba(0, 0, 0, 0.7);
padding: 30px;
border-radius: 15px;
backdrop-filter: blur(10px);
transition: opacity 0.3s ease;
}
.connection-waiting.show {
opacity: 1;
}
/* 加载动画 */
.loading-spinner {
width: 40px;
height: 40px;
border: 3px solid rgba(255, 255, 255, 0.3);
border-top: 3px solid white;
border-radius: 50%;
animation: spin 1s linear infinite;
margin: 0 auto 10px;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* 响应式设计 - 确保在不同屏幕尺寸下视频容器保持9:16比例 */
@media (max-width: 768px) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
#recordedVideo, #recordedVideoBuffer {
width: 56.25vh; /* 9:16比例 */
height: 100vh;
object-fit: cover;
}
}
@media (min-width: 769px) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
#recordedVideo, #recordedVideoBuffer {
width: 56.25vh; /* 9:16比例 */
height: 100vh;
object-fit: cover;
}
}
/* 横屏模式优化 */
@media (orientation: landscape) and (max-height: 500px) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
.controls {
bottom: 20px;
}
}
/* 竖屏模式优化 */
@media (orientation: portrait) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
}
.controls {
position: absolute;
bottom: 50px;
left: 50%;
transform: translateX(-50%);
z-index: 10;
display: flex !important;
flex-direction: row !important;
justify-content: center;
align-items: center;
gap: 20px;
}
/* 确保移动端也保持同一行 */
@media (max-width: 768px) {
.controls {
flex-direction: row !important;
gap: 15px;
}
}
#startButton {
width: 60px;
height: 60px;
border-radius: 50%;
background: rgba(34, 197, 94, 0.9);
backdrop-filter: blur(10px);
border: none;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.3s ease;
box-shadow: 0 4px 15px rgba(34, 197, 94, 0.3);
min-width: auto;
padding: 15px 30px;
font-size: 1.1rem;
border-radius: 25px;
min-width: 200px;
}
#startButton:hover:not(:disabled) {
background: rgba(22, 163, 74, 0.95);
transform: scale(1.1);
box-shadow: 0 6px 20px rgba(34, 197, 94, 0.5);
}
#startButton.connecting {
background: rgba(255, 193, 7, 0.9);
cursor: not-allowed;
}
#startButton.connecting:hover {
background: rgba(255, 193, 7, 0.9);
transform: none;
}
#startButton.calling {
background: rgba(255, 193, 7, 0.9);
animation: pulse 2s infinite;
}
#startButton.calling:hover {
background: rgba(255, 193, 7, 0.95);
transform: scale(1.05);
}
@keyframes pulse {
0% {
box-shadow: 0 4px 15px rgba(255, 193, 7, 0.3);
}
50% {
box-shadow: 0 6px 25px rgba(255, 193, 7, 0.6);
}
100% {
box-shadow: 0 4px 15px rgba(255, 193, 7, 0.3);
}
}
.audio-status {
position: absolute;
top: 20px;
left: 50%;
transform: translateX(-50%);
background: rgba(0, 0, 0, 0.7);
color: white;
padding: 8px 16px;
border-radius: 20px;
font-size: 14px;
z-index: 1000;
transition: all 0.3s ease;
}
.audio-status.connecting {
background: rgba(255, 193, 7, 0.9);
color: #000;
}
.audio-status.connected {
background: rgba(40, 167, 69, 0.9);
color: white;
}
.audio-status.error {
background: rgba(220, 53, 69, 0.9);
color: white;
}
#startButton svg {
width: 24px;
height: 24px;
fill: white;
}
#startButton:disabled {
opacity: 0.5;
cursor: not-allowed;
}
#stopButton {
width: 60px;
height: 60px;
border-radius: 50%;
background: rgba(220, 53, 69, 0.9);
backdrop-filter: blur(10px);
border: none;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.3s ease;
box-shadow: 0 4px 15px rgba(220, 53, 69, 0.3);
padding: 0; /* 确保没有内边距影响居中 */
}
#stopButton:hover:not(:disabled) {
background: rgba(200, 35, 51, 0.95);
transform: scale(1.1);
}
#stopButton svg {
width: 24px;
height: 24px;
display: block; /* 确保SVG作为块级元素 */
margin: auto; /* 额外的居中保证 */
}
#stopButton:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* 头像样式 - 确保显示 */
.avatar-container {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 15; /* 提高z-index确保在视频上方 */
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
transition: opacity 0.3s ease;
opacity: 1; /* 确保默认显示 */
}
.avatar-container.hidden {
opacity: 0;
pointer-events: none;
}
.avatar {
width: 120px;
height: 120px;
border-radius: 50%;
border: 4px solid rgba(255, 255, 255, 0.8);
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2);
/* background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); */
background: #000000;
display: flex;
align-items: center;
justify-content: center;
color: white;
font-size: 48px;
font-weight: bold;
overflow: hidden; /* 确保图片不会溢出 */
}
.avatar img {
width: 100%;
height: 100%;
border-radius: 50%;
object-fit: cover;
display: block; /* 确保图片显示 */
}
/* 确保视频默认隐藏 */
#recordedVideo, #recordedVideoBuffer {
position: absolute;
width: 56.25vh;
height: 100vh;
object-fit: cover;
border-radius: 0;
box-shadow: none;
/* transition: opacity 0.5s ease-in-out; */
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
opacity: 1; /* 默认隐藏视频 */
z-index: 1; /* 确保在头像下方 */
}
/* 通话时隐藏头像,显示视频 */
.video-container.calling .avatar-container {
opacity: 0;
pointer-events: none;
}
.video-container.calling #recordedVideo {
opacity: 1;
z-index: 10;
}
</style>
</head>
<body>
<div class="container">
<!-- 隐藏的header -->
<header style="display: none;">
<header>
<h1>WebRTC 音频通话</h1>
<p>实时播放录制视频,支持文本和语音输入</p>
</header>
<div class="main-content">
<!-- 音频状态显示 - 完全隐藏 -->
<div class="audio-status" style="display: none;">
<!-- 音频状态显示 -->
<div class="audio-status">
<div class="status-indicator">
<span id="audioStatus" style="display: none;">未连接</span>
<span id="audioStatus">未连接</span>
</div>
</div>
<!-- 录制视频播放区域 - 全屏显示 -->
<!-- 录制视频播放区域 -->
<div class="recorded-video-section">
<div class="video-container" id="videoContainer">
<!-- 头像容器 -->
<div class="avatar-container" id="avatarContainer">
<div class="avatar" id="avatar">
<!-- 使用相对路径引用图片 -->
<img src="./tx.png" alt="头像" onerror="this.style.display='none'; this.parentElement.innerHTML='壹和零';">
</div>
<!-- <div class="avatar-name">AI助手</div> -->
</div>
<!-- 主视频元素 -->
<video id="recordedVideo" autoplay muted>
<source src="" type="video/mp4">
您的浏览器不支持视频播放
</video>
<!-- 缓冲视频元素 -->
<video id="recordedVideoBuffer" autoplay muted>
<source src="" type="video/mp4">
您的浏览器不支持视频播放
</video>
<!-- 加载指示器 -->
<div class="video-loading" id="videoLoading">
<div class="loading-spinner"></div>
<!-- <div>正在切换视频...</div> -->
</div>
<!-- 等待连接提示 -->
<div class="connection-waiting" id="connectionWaiting" style="display: none;">
<div class="loading-spinner"></div>
<div style="color: white; font-size: 18px; margin-top: 10px;">等待连接通话中...</div>
</div>
</div>
<div class="video-info" style="display: none;">
<h3>录制视频播放</h3>
<video id="recordedVideo" autoplay muted>
<source src="" type="video/mp4">
您的浏览器不支持视频播放
</video>
<div class="video-info">
<span id="currentVideoName">未选择视频</span>
</div>
</div>
<!-- 控制按钮 - 悬浮在视频上方 -->
<!-- 控制按钮 -->
<div class="controls">
<button id="startButton" class="btn btn-primary" title="开始通话">
<!-- 默认通话图标 -->
<svg id="callIcon" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6.62 10.79c1.44 2.83 3.76 5.14 6.59 6.59l2.2-2.2c.27-.27.67-.36 1.02-.24 1.12.37 2.33.57 3.57.57.55 0 1 .45 1 1V20c0 .55-.45 1-1 1-9.39 0-17-7.61-17-17 0-.55.45-1 1-1h3.5c.55 0 1 .45 1 1 0 1.25.2 2.45.57 3.57.11.35.03.74-.25 1.02l-2.2 2.2z" fill="white"/>
</svg>
<!-- 通话中文字显示(初始隐藏) -->
<span id="callingText" style="display: none; color: white; font-size: 14px;">正在通话中</span>
</button>
<button id="stopButton" class="btn btn-danger" disabled title="结束通话" style="display: none;">
<svg viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M19.23 15.26l-2.54-.29c-.61-.07-1.21.14-1.64.57l-1.84 1.84c-2.83-1.44-5.15-3.75-6.59-6.59l1.85-1.85c.43-.43.64-1.03.57-1.64l-.29-2.52c-.12-1.01-.97-1.77-1.99-1.77H5.03c-1.13 0-2.07.94-2 2.07.53 8.54 7.36 15.36 15.89 15.89 1.13.07 2.07-.87 2.07-2v-1.73c.01-1.01-.75-1.86-1.76-1.98z" fill="white"/>
<line x1="18" y1="6" x2="6" y2="18" stroke="white" stroke-width="2"/>
</svg>
</button>
<button id="startButton" class="btn btn-primary">开始音频通话</button>
<button id="stopButton" class="btn btn-danger" disabled>停止通话</button>
<!-- <button id="muteButton" class="btn btn-secondary">静音</button>
<button id="defaultVideoButton" class="btn btn-info">回到默认视频</button>
<button id="testVideoButton" class="btn btn-warning">测试视频文件</button> -->
</div>
<!-- 隐藏的输入区域 -->
<div class="input-section" style="display: none;">
<!-- 输入区域 -->
<div class="input-section">
<div class="text-input-group">
<input type="text" id="textInput" placeholder="输入文本内容..." />
<button id="sendTextButton" class="btn btn-primary">发送文本</button>
</div>
</div>
<!-- 隐藏的视频选择 -->
<div class="video-selection" style="display: none;">
<h3>选择要播放的视频</h3>
<div id="videoList" class="video-list">
<!-- 视频列表将在这里动态生成 -->
<div class="voice-input-group">
<button id="startVoiceButton" class="btn btn-success">开始语音输入</button>
<button id="stopVoiceButton" class="btn btn-warning" disabled>停止语音输入</button>
<span id="voiceStatus">点击开始语音输入</span>
</div>
</div>
<!-- 隐藏的状态显示 -->
<div class="status-section" style="display: none;">
<div id="connectionStatus" class="status" style="display: none;">未连接</div>
<!-- 视频选择 -->
<!-- <div class="video-selection">
<h3>选择要播放的视频</h3>
<div id="videoList" class="video-list">
视频列表将在这里动态生成 -->
<!-- </div>
</div> -->
<!-- 状态显示 -->
<div class="status-section">
<div id="connectionStatus" class="status">未连接</div>
<div id="messageLog" class="message-log"></div>
</div>
</div>
@ -535,4 +79,4 @@
<script src="/socket.io/socket.io.js"></script>
<script type="module" src="./index.js"></script>
</body>
</html>
</html>

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +1,5 @@
// 以流式方式请求LLM大模型接口并打印流式返回内容
// 过滤旁白内容的函数
function filterNarration(text) {
if (!text) return text;
// 匹配各种括号内的旁白内容
// 包括:()、【】、[]、{}、〈〉、《》等
const narrationPatterns = [
/[^]*/g, // 中文圆括号
/\([^)]*\)/g, // 英文圆括号
/【[^】]*】/g, // 中文方括号
/\[[^\]]*\]/g, // 英文方括号
/\{[^}]*\}/g, // 花括号
/〈[^〉]*〉/g, // 中文尖括号
/《[^》]*》/g, // 中文书名号
/<[^>]*>/g // 英文尖括号
];
let filteredText = text;
// 逐个应用过滤规则
narrationPatterns.forEach(pattern => {
filteredText = filteredText.replace(pattern, '');
});
// 清理多余的空格和换行
filteredText = filteredText.replace(/\s+/g, ' ').trim();
return filteredText;
}
async function requestLLMStream({ apiKey, model, messages, onSegment }) {
const response = await fetch('https://ark.cn-beijing.volces.com/api/v3/bots/chat/completions', {
method: 'POST',
@ -59,7 +29,7 @@ async function requestLLMStream({ apiKey, model, messages, onSegment }) {
let pendingText = ''; // 待处理的文本片段
// 分段分隔符
const segmentDelimiters = /[,。:;!?,.:;!?]|\.{3,}|……|…/;
const segmentDelimiters = /[,。:;!?,.:;!?]/;
while (!done) {
const { value, done: doneReading } = await reader.read();
@ -81,17 +51,9 @@ async function requestLLMStream({ apiKey, model, messages, onSegment }) {
if (jsonStr === '[DONE]') {
console.log('LLM SSE流结束');
// 处理最后的待处理文本无论长度是否大于5个字
// 处理最后的待处理文本
if (pendingText.trim() && onSegment) {
console.log('处理最后的待处理文本:', pendingText.trim());
// 过滤旁白内容
const filteredText = filterNarration(pendingText.trim());
if (filteredText.trim()) {
console.log('过滤旁白后的最后文本:', filteredText);
await onSegment(filteredText, true);
} else {
console.log('最后的文本被完全过滤,跳过');
}
await onSegment(pendingText.trim());
}
continue;
}
@ -102,50 +64,27 @@ async function requestLLMStream({ apiKey, model, messages, onSegment }) {
const deltaContent = obj.choices[0].delta.content;
content += deltaContent;
pendingText += deltaContent;
console.log('【未过滤】LLM内容片段:', pendingText);
console.log('LLM内容片段:', deltaContent);
// 先过滤旁白,再检查分段分隔符
const filteredPendingText = filterNarration(pendingText);
// 检查过滤后的文本是否包含分段分隔符
if (segmentDelimiters.test(filteredPendingText)) {
// 按分隔符分割已过滤的文本
const segments = filteredPendingText.split(segmentDelimiters);
// 重新组合处理:只处理足够长的完整段落
let accumulatedText = '';
let hasProcessed = false;
// 检查是否包含分段分隔符
if (segmentDelimiters.test(pendingText)) {
// 按分隔符分割文本
const segments = pendingText.split(segmentDelimiters);
// 处理完整的段落(除了最后一个,因为可能不完整)
for (let i = 0; i < segments.length - 1; i++) {
const segment = segments[i].trim();
if (segment) {
accumulatedText += segment;
// 找到分隔符
const delimiterMatch = filteredPendingText.match(segmentDelimiters);
if (delimiterMatch) {
accumulatedText += delimiterMatch[0];
}
// 如果累积文本长度大于5个字处理它
if (accumulatedText.length > 8 && onSegment) {
console.log('【已过滤】检测到完整段落:', accumulatedText);
// 文本已经过滤过旁白,直接使用
if (accumulatedText.trim()) {
console.log('处理过滤后的文本:', accumulatedText);
await onSegment(accumulatedText, false);
}
hasProcessed = true;
accumulatedText = ''; // 重置
}
if (segment && onSegment) {
// 找到对应的分隔符
const delimiterMatch = pendingText.match(segmentDelimiters);
const segmentWithDelimiter = segment + (delimiterMatch ? delimiterMatch[0] : '');
console.log('检测到完整段落:', segmentWithDelimiter);
await onSegment(segmentWithDelimiter);
}
}
// 更新pendingText - 使用原始文本但需要相应调整
if (hasProcessed) {
// 计算已处理的原始文本长度更新pendingText
const processedLength = pendingText.length - (segments[segments.length - 1] || '').length;
pendingText = pendingText.substring(processedLength);
}
// 保留最后一个不完整的段落
pendingText = segments[segments.length - 1] || '';
}
}
} catch (e) {

View File

@ -1,11 +1,11 @@
// 以流式或非流式方式请求 minimaxi 大模型接口,并打印/返回内容
// import { text } from "express";
window.isPlaying = false;
// 在文件顶部添加音频播放相关的变量和函数
let audioContext = null;
let audioQueue = []; // 音频队列
let isPlaying = false;
// let isPlaying = false;
let isProcessingQueue = false; // 队列处理状态
let nextStartTime = 0; // 添加这行来声明 nextStartTime 变量
@ -52,26 +52,25 @@ async function addAudioToQueue(audioHex) {
console.error('音频解码失败:', error);
}
}
let isFirstChunk = true;
// 队列处理器 - 独立运行,按顺序播放音频
async function processAudioQueue() {
if (isProcessingQueue) return;
isProcessingQueue = true;
while (audioQueue.length > 0 && !isPlaying) {
console.log('开始处理音频队列');
console.log('开始处理音频队列');
let isFirstChunk = true;
while (audioQueue.length > 0 || window.isPlaying) {
// 如果当前没有音频在播放,且队列中有音频
if (!isPlaying && audioQueue.length > 0) {
if (!window.isPlaying && audioQueue.length > 0) {
const audioItem = audioQueue.shift();
const sayName = '8-4-sh'
const targetVideo = window.webrtcApp.interactionVideo
const sayName = 'say-3s-m-sw'
const targetVideo = 's-1.mp4'
// 如果是第一个音频片段,触发视频切换
if (sayName != window.webrtcApp.currentVideoTag && window.webrtcApp && window.webrtcApp.switchVideoStream) {
if (sayName != window.webrtcApp.currentVideoTag && window.webrtcApp && window.webrtcApp.handleTextInput) {
try {
console.log('--------------触发视频切换:', sayName);
window.webrtcApp.switchVideoStream(targetVideo, 'audio', '8-4-sh');
await window.webrtcApp.switchVideoWithReplaceTrack(targetVideo, 'audio', 'say-3s-m-sw');
isFirstChunk = false;
window.webrtcApp.currentVideoTag = sayName;
} catch (error) {
@ -86,21 +85,12 @@ async function processAudioQueue() {
}
isProcessingQueue = false;
// 等待当前音频播放完成后再切换回默认视频
// while (isPlaying) {
// console.log("触发音频等待")
// await new Promise(resolve => setTimeout(resolve, 1000));
// }
// console.log("触发音频等待")
// await new Promise(resolve => setTimeout(resolve, 300));
const text = 'default'
console.log("音频结束------------------------", window.webrtcApp.currentVideoTag, isPlaying)
if (window.webrtcApp.currentVideoTag != text && !isPlaying) {
isFirstChunk = true
await window.webrtcApp.socket.emit('voice-input', { text });
if (window.webrtcApp.currentVideoTag != text) {
window.webrtcApp.currentVideoTag = text
window.webrtcApp.switchVideoStream(window.webrtcApp.defaultVideo, 'audio', text);
await window.webrtcApp.switchVideoWithReplaceTrack(window.webrtcApp.defaultVideo, 'audio', text);
}
console.log('音频队列处理完成');
}
@ -114,29 +104,29 @@ function playAudioData(audioData) {
source.buffer = audioData;
source.connect(ctx.destination);
isPlaying = true;
window.isPlaying = true;
source.onended = () => {
console.log('音频片段播放完成');
isPlaying = false;
window.isPlaying = false;
resolve();
};
// 超时保护
// setTimeout(() => {
// if (isPlaying) {
// console.log('音频播放超时,强制结束');
// isPlaying = false;
// resolve();
// }
// }, (audioData.duration + 0.5) * 1000);
setTimeout(() => {
if (window.isPlaying) {
console.log('音频播放超时,强制结束');
window.isPlaying = false;
resolve();
}
}, (audioData.duration + 0.5) * 1000);
source.start(0);
console.log(`开始播放音频片段,时长: ${audioData.duration}`);
} catch (error) {
console.error('播放音频失败:', error);
isPlaying = false;
window.isPlaying = false;
resolve();
}
});
@ -162,10 +152,10 @@ function getQueueStatus() {
// 移除waitForCurrentAudioToFinish函数不再需要
async function requestMinimaxi({ apiKey, groupId, body, stream = true , textPlay = false}) {
async function requestMinimaxi({ apiKey, groupId, body, stream = true }) {
const url = `https://api.minimaxi.com/v1/t2a_v2`;
const reqBody = { ...body, stream };
isPlaying = textPlay
// 添加这两行变量定义
let isFirstChunk = true;
// const currentText = body.text;
@ -232,8 +222,8 @@ async function requestMinimaxi({ apiKey, groupId, body, stream = true , textPlay
// 流式解析每个chunk实时播放音频
if (obj.data && obj.data.audio && obj.data.status === 1) {
console.log('收到音频数据片段!', obj.data.audio.length);
// audioHex += obj.data.audio;
audioHex = obj.data.audio;
audioHex += obj.data.audio;
// const sayName = 'say-5s-m-sw'
// // 如果是第一个音频片段,触发视频切换
// if (isFirstChunk && sayName != window.webrtcApp.currentVideoName && window.webrtcApp && window.webrtcApp.handleTextInput) {
@ -254,7 +244,7 @@ async function requestMinimaxi({ apiKey, groupId, body, stream = true , textPlay
// const text = 'default'
// await window.webrtcApp.socket.emit('text-input', { text });
// await window.webrtcApp.handleTextInput(text);
lastFullResult = null;
lastFullResult = obj;
console.log('收到最终状态');
}
} catch (e) {
@ -271,7 +261,7 @@ async function requestMinimaxi({ apiKey, groupId, body, stream = true , textPlay
const obj = JSON.parse(line);
if (obj.data && obj.data.audio) {
console.log('收到无data:音频数据!', obj.data.audio.length);
audioHex = obj.data.audio;
audioHex += obj.data.audio;
// 立即播放这个音频片段
await playAudioChunk(obj.data.audio);
@ -431,4 +421,4 @@ function generateUUID() {
});
}
export { requestMinimaxi, requestVolcanTTS, addAudioToQueue };
export { requestMinimaxi, requestVolcanTTS };

View File

@ -101,14 +101,6 @@ header p {
.recorded-video-section {
margin-bottom: 30px;
text-align: center;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
/* 确保视频区域固定高度并居中 */
min-height: 100vh;
max-height: 100vh;
width: 100%;
}
.recorded-video-section h3 {
@ -117,22 +109,14 @@ header p {
}
#recordedVideo {
max-width: 100%;
max-height: 100%;
width: 100%;
height: 100%;
border-radius: 0;
box-shadow: none;
object-fit: cover; /* 覆盖整个容器 */
background: transparent; /* 透明背景 */
max-width: 400px; /* 限制最大宽度 */
aspect-ratio: 9/16; /* 固定9:16比例 */
border-radius: 10px;
box-shadow: 0 5px 15px rgba(0,0,0,0.2);
object-fit: cover; /* 确保视频填充容器 */
background: #000; /* 视频背景色 */
transition: opacity 0.15s; /* 添加透明度过渡效果 */
margin: 0 auto; /* 左右居中 */
display: block; /* 确保块级显示 */
/* 确保视频始终居中 */
position: absolute;
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
/* 视频加载时的样式 */
@ -439,50 +423,6 @@ header p {
.video-list {
grid-template-columns: 1fr;
}
/* 移动端视频容器优化 */
.video-container {
height: 100vh;
width: 100vw;
}
#recordedVideo {
width: 100%;
height: 100%;
object-fit: cover;
}
}
/* 桌面端视频容器优化 */
@media (min-width: 769px) {
.video-container {
height: 100vh;
width: 100vw;
}
#recordedVideo {
width: 100%;
height: 100%;
object-fit: cover;
}
}
/* 横屏模式优化 */
@media (orientation: landscape) and (max-height: 500px) {
.video-container {
height: 100vh;
}
.controls {
bottom: 20px;
}
}
/* 竖屏模式优化 */
@media (orientation: portrait) {
.video-container {
height: 100vh;
}
}
/* 动画效果 */
@ -508,22 +448,42 @@ header p {
}
#recordedVideo {
transition: opacity 0.1s ease-in-out; /* 缩短过渡时间 */
transition: opacity 0.2s ease-in-out;
background-color: #1a1a1a; /* 深灰色背景,避免纯黑 */
}
#recordedVideo.loading {
opacity: 0.9; /* 提高loading时的透明度减少黑屏感 */
opacity: 0.8; /* 加载时稍微降低透明度,但不完全隐藏 */
}
#recordedVideo.playing {
opacity: 1;
}
/* 优化加载指示器 */
/* 添加加载指示器 */
.video-container {
position: relative;
}
.video-container::before {
content: '';
position: absolute;
top: 50%;
left: 50%;
width: 40px;
height: 40px;
margin: -20px 0 0 -20px;
border: 3px solid #333;
border-top: 3px solid #fff;
border-radius: 50%;
animation: spin 1s linear infinite;
opacity: 0;
z-index: 10;
transition: opacity 0.3s;
}
.video-container.loading::before {
opacity: 0.8; /* 降低加载指示器的透明度 */
border-top-color: #667eea; /* 使用主题色 */
opacity: 1;
}
@keyframes spin {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 287 KiB

View File

@ -13,11 +13,11 @@ export async function playVideoWithAudio(videoPath, text) {
apiKey: minimaxiConfig.apiKey,
groupId: minimaxiConfig.groupId,
body: {
model: 'speech-01-turbo',
model: 'speech-02-hd',
text,
output_format: 'hex', // 流式场景必须使用hex
voice_setting: {
voice_id: 'tianbing_xinggan_03',
voice_id: 'yantu-qinggang',
speed: 1
}
},

View File

@ -1,89 +0,0 @@
// 视频播放队列系统测试
// 这个文件用于测试新的视频播放逻辑
export class VideoQueueTester {
constructor(webrtcApp) {
this.webrtcApp = webrtcApp;
}
// 测试视频队列功能
async testVideoQueue() {
console.log('开始测试视频播放队列系统...');
// 测试1: 添加视频到队列
await this.testAddToQueue();
// 测试2: 测试视频播放完成等待
await this.testWaitForVideoFinish();
// 测试3: 测试音频视频同步
await this.testAudioVideoSync();
console.log('视频播放队列系统测试完成');
}
// 测试添加视频到队列
async testAddToQueue() {
console.log('测试1: 添加视频到队列');
// 清空队列
this.webrtcApp.videoQueue = [];
// 添加测试视频
await this.webrtcApp.addToVideoQueue('5.mp4', 'test', '测试视频1');
await this.webrtcApp.addToVideoQueue('s-1.mp4', 'test', '测试视频2');
console.log(`队列长度: ${this.webrtcApp.videoQueue.length}`);
console.log('队列内容:', this.webrtcApp.videoQueue);
}
// 测试等待视频播放完成
async testWaitForVideoFinish() {
console.log('测试2: 等待视频播放完成');
// 模拟视频播放状态
this.webrtcApp.isVideoPlaying = true;
// 模拟视频播放完成
setTimeout(() => {
this.webrtcApp.isVideoPlaying = false;
console.log('模拟视频播放完成');
}, 2000);
console.log('等待视频播放完成...');
await this.webrtcApp.waitForCurrentVideoToFinish();
console.log('视频播放完成等待测试通过');
}
// 测试音频视频同步
async testAudioVideoSync() {
console.log('测试3: 音频视频同步');
// 模拟音频播放开始
window.isPlaying = true;
// 添加视频到队列
await this.webrtcApp.addToVideoQueue('5.mp4', 'audio', '音频同步测试');
// 模拟音频播放结束
setTimeout(() => {
window.isPlaying = false;
console.log('模拟音频播放结束');
}, 3000);
console.log('音频视频同步测试完成');
}
// 运行所有测试
async runAllTests() {
try {
await this.testVideoQueue();
console.log('所有测试通过!');
} catch (error) {
console.error('测试失败:', error);
}
}
}
// 导出测试类
export default VideoQueueTester;

BIN
videos/0.mp4 Normal file

Binary file not shown.

BIN
videos/1-m.mp4 Normal file

Binary file not shown.

BIN
videos/2.mp4 Normal file

Binary file not shown.

BIN
videos/4-m.mp4 Normal file

Binary file not shown.

BIN
videos/5.mp4 Normal file

Binary file not shown.

BIN
videos/6.mp4 Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
videos/d-3s.mp4 Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
videos/s-1.mp4 Normal file

Binary file not shown.