mobileNet/game/钢琴/index.html

844 lines
36 KiB
HTML
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>AI 空气钢琴 - 手势控制</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Arial', sans-serif;
height: 100vh;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
/* 钢琴主题背景色 */
background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%);
color: white;
overflow: hidden;
}
h1 {
font-size: 3.5em;
margin-bottom: 5px;
color: #ecf0f1; /* 亮灰色 */
text-shadow: 3px 3px 6px rgba(0, 0, 0, 0.4);
}
.subtitle {
font-size: 1.2em;
margin-bottom: 30px;
color: #bdc3c7; /* 浅灰色 */
}
#main-app {
display: flex;
gap: 30px;
align-items: flex-start;
flex-wrap: wrap;
justify-content: center;
padding: 20px;
}
#video-feed-container {
position: relative;
width: 480px;
height: 360px;
background: rgba(0, 0, 0, 0.5);
border: 5px solid #bdc3c7; /* 浅灰色边框 */
border-radius: 15px;
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.5);
overflow: hidden;
}
#video-feed-container video {
width: 100%;
height: 100%;
object-fit: cover;
border-radius: 10px;
}
#video-feed-container canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
border-radius: 10px;
}
.status-display {
position: absolute;
bottom: 0px;
left: 0;
right: 0;
height: 40px;
background: rgba(0, 0, 0, 0.7);
color: #2ecc71; /* 绿色状态文本 */
display: flex;
align-items: center;
justify-content: center;
font-size: 0.95em;
border-bottom-left-radius: 10px;
border-bottom-right-radius: 10px;
font-weight: bold;
text-shadow: 1px 1px 2px rgba(0,0,0,0.3);
}
#control-panel {
background: rgba(0, 0, 0, 0.6);
padding: 25px;
border-radius: 15px;
box-shadow: 0 5px 20px rgba(0, 0, 0, 0.4);
display: flex;
flex-direction: column;
gap: 20px;
min-width: 350px;
}
.panel-section {
padding: 15px;
border-radius: 10px;
background: rgba(255, 255, 255, 0.1);
text-align: center;
}
.panel-section h3 {
color: #ADD8E6; /* 浅蓝色标题 */
margin-bottom: 15px;
font-size: 1.5em;
}
.panel-section p {
font-size: 1.1em;
margin-bottom: 10px;
color: #e0f2f7;
}
.action-button {
padding: 12px 25px;
font-size: 1.1em;
border: none;
border-radius: 8px;
cursor: pointer;
transition: all 0.3s ease;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.3);
color: white;
font-weight: bold;
min-width: 150px;
}
#importModelBtn {
background: linear-gradient(45deg, #f39c12, #f1c40f); /* 橙黄渐变 */
}
#importModelBtn:hover {
background: linear-gradient(45deg, #e67e22, #f39c12);
transform: translateY(-2px);
}
#importModelBtn:disabled {
background: #607d8b;
cursor: not-allowed;
opacity: 0.7;
}
#startStopBtn {
background: linear-gradient(45deg, #27ae60, #2ecc71); /* 绿色渐变 */
}
#startStopBtn.playing {
background: linear-gradient(45deg, #c0392b, #e74c3c); /* 红色渐变 */
}
#startStopBtn:hover:not(:disabled) {
transform: translateY(-2px);
}
#startStopBtn:disabled {
background: #607d8b;
cursor: not-allowed;
opacity: 0.7;
}
.info-item {
font-size: 1.1em;
padding: 8px 0;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
}
.info-item:last-child {
border-bottom: none;
}
.info-label {
font-weight: bold;
color: #dbe4ee;
}
.info-value {
float: right;
color: #e0f2f7;
}
#currentPlayingAction {
font-size: 1.8em;
font-weight: bold;
color: #f1c40f; /* 鲜黄色 */
text-shadow: 0 0 10px rgba(241, 196, 15, 0.6);
margin-top: 15px;
animation: pulseText 1s infinite alternate;
}
@keyframes pulseText {
from { transform: scale(1); opacity: 1; }
to { transform: scale(1.05); opacity: 0.9; }
}
/* 手部关键点和连接线样式 */
.keypoint {
fill: #f1c40f; /* 关键点颜色 */
stroke: #f1c40f;
stroke-width: 2px;
}
.connection {
stroke: #3498db; /* 连接线颜色 蓝色 */
stroke-width: 3px;
}
.mapping-list {
list-style: none;
padding: 0;
text-align: left;
margin-top: 10px;
}
.mapping-list li {
margin-bottom: 5px;
font-size: 0.95em;
color: #e0f2f7;
}
.mapping-list li strong {
color: #ADD8E6;
min-width: 70px;
display: inline-block;
}
.mapping-list li span.action-name {
color: #f1c40f;
}
</style>
<!-- 引入 TensorFlow.js 核心库 -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@4.20.0/dist/tf.min.js"></script>
<!-- KNN 分类器 -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/knn-classifier@1.2.2/dist/knn-classifier.min.js"></script>
<!-- !!! 重点MediaPipe Hands 解决方案文件 -->
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/hands"></script>
<!-- TensorFlow Models - Hand Pose Detection 库 -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/hand-pose-detection"></script>
</head>
<body>
<h1>AI 空气钢琴</h1>
<p class="subtitle">通过手势弹奏虚拟钢琴!</p>
<div id="main-app">
<!-- 视频流区域 -->
<div id="video-feed-container">
<video id="videoFeed" autoplay muted playsinline></video>
<canvas id="poseCanvas"></canvas>
<div class="status-display" id="globalStatus">正在加载模型,请稍候...</div>
</div>
<!-- 控制面板 -->
<div id="control-panel">
<div class="panel-section">
<h3>模型管理</h3>
<input type="file" id="fileImporter" accept=".json" style="display: none;">
<button id="importModelBtn" class="action-button">导入手势模型</button>
</div>
<div class="panel-section">
<h3>演奏控制</h3>
<button id="startStopBtn" class="action-button" disabled>开始演奏</button>
<div class="info-item" style="margin-top: 15px;">
<span class="info-label">实时手势:</span>
<span class="info-value" id="currentGestureDisplay">未识别</span>
</div>
<div class="info-item">
<span class="info-label">置信度:</span>
<span class="info-value" id="confidenceDisplay">0%</span>
</div>
<div class="info-item">
<span class="info-label">当前演奏音符:</span> <!-- 文本修改为音符 -->
<span class="info-value" id="currentPlayingAction"></span>
</div>
</div>
<div class="panel-section">
<h3>音符映射</h3> <!-- 文本修改为音符 -->
<p>请确保您的手势分类与音符对应:</p>
<ul class="mapping-list" id="gestureMappingList">
<!-- JS会动态填充此列表 -->
<li><strong>ID 0:</strong> <span class="action-name">中央C (C4)</span> → 音段 1</li>
<li><strong>ID 1:</strong> <span class="action-name">D4</span> → 音段 2</li>
<li><strong>ID 2:</strong> <span class="action-name">E4</span> → 音段 3</li>
<li><strong>ID 3:</strong> <span class="action-name">F4</span> → 音段 4</li>
<li><strong>ID 4:</strong> <span class="action-name">G4</span> → 音段 5</li>
<li><strong>ID 5:</strong> <span class="action-name">A4</span> → 音段 6</li>
<li><strong>ID 6:</strong> <span class="action-name">B4</span> → 音段 7</li>
<li><strong>ID 7:</strong> <span class="action-name">高音C (C5)</span> → 音段 8</li>
</ul>
</div>
</div>
</div>
<script>
// ==========================================================
// 全局变量和 DOM 引用
// ==========================================================
const videoElement = document.getElementById('videoFeed');
const poseCanvas = document.getElementById('poseCanvas');
const poseCtx = poseCanvas.getContext('2d');
const globalStatusDisplay = document.getElementById('globalStatus');
const currentGestureDisplay = document.getElementById('currentGestureDisplay');
const confidenceDisplay = document.getElementById('confidenceDisplay');
const currentPlayingActionDisplay = document.getElementById('currentPlayingAction');
const gestureMappingList = document.getElementById('gestureMappingList');
const importModelBtn = document.getElementById('importModelBtn');
const fileImporter = document.getElementById('fileImporter');
const startStopBtn = document.getElementById('startStopBtn');
let detector; // Hand Pose Detection Detector
let classifier; // KNN 分类器
let isHandDetectionReady = false;
let isModelLoaded = false;
let isPlaying = false;
let animationFrameId;
let currentDetectedClassId = null;
let currentPlayingActionId = null;
// 用于映射手势分类ID到音频文件和音符名称
// !!! IMPORTANT: 请确保这些音频文件存在于 'sounds/' 文件夹中,并且名称正确 !!!
const gestureClassToAudioMap = {
'0': { name: '中央C (C2)', audio: new Audio('sounds/C2.mp3') },
'1': { name: 'D2', audio: new Audio('sounds/D2.mp3') },
'2': { name: 'E2', audio: new Audio('sounds/E2.mp3') },
'3': { name: 'F2', audio: new Audio('sounds/F2.mp3') },
'4': { name: 'G2', audio: new Audio('sounds/G2.mp3') },
'5': { name: 'A2', audio: new Audio('sounds/A2.mp3') },
'6': { name: 'B2', audio: new Audio('sounds/B2.mp3') },
'7': { name: '高音C (C3)', audio: new Audio('sounds/C3.mp3') },
'8': { name: '空', audio: new Audio('sounds/rest.mp3') } // 如果需要休息/不触发音符的类别
};
// 可配置项
const MIN_CONFIDENCE_THRESHOLD = 0.30; // 最低置信度阈值 (70%)
// MediaPipe Hands 模型连接点,用于绘制骨骼
const HAND_CONNECTIONS = [
[0, 1], [1, 2], [2, 3], [3, 4], // Thumb (大拇指)
[0, 5], [5, 6], [6, 7], [7, 8], // Index (食指)
[0, 9], [9, 10], [10, 11], [11, 12], // Middle (中指)
[0, 13], [13, 14], [14, 15], [15, 16], // Ring (无名指)
[0, 17], [17, 18], [18, 19], [19, 20], // Pinky (小指)
// 掌心连接 (连接腕部到手指基部,形成手掌轮廓)
[0, 5], [5, 9], [9, 13], [13, 17], [17, 0] // 闭合手掌
];
// ==========================================================
// 初始化函数
// ==========================================================
document.addEventListener('DOMContentLoaded', initApp);
async function initApp() {
updateGlobalStatus('正在加载手势模型和摄像头...');
lockControls(true);
try {
// 初始化 KNN 分类器
classifier = knnClassifier.create();
// --- 切换为手部检测器 ---
const model = handPoseDetection.SupportedModels.MediaPipeHands;
const detectorConfig = {
runtime: 'mediapipe',
solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/hands'
};
detector = await handPoseDetection.createDetector(model, detectorConfig);
await setupCamera();
isHandDetectionReady = true;
updateGlobalStatus('手部检测器和摄像头已就绪。');
lockControls(false);
startStopBtn.disabled = true;
// 启动手势检测循环
startDetectionLoop();
// 绑定按钮事件
importModelBtn.addEventListener('click', () => fileImporter.click());
fileImporter.addEventListener('change', (event) => loadKNNModelData(event.target.files[0])); // 改为通用加载函数
startStopBtn.addEventListener('click', togglePlaying);
// 预加载所有音频
preloadAudios();
// 初始更新映射UI显示所有8个音符的映射
updateGestureMappingUI();
// --- 新增:尝试自动从 CDN 加载 KNN 模型数据 ---
// !!! 请替换为你的实际 CDN 模型 URL !!!
const cdnJsonUrl = 'https://goood-space-assets.oss-cn-beijing.aliyuncs.com/public/models/hand-knn-model.json';
// 如果你的 KNN 数据是分 bin 而不是直接包含在 json你需要类似上次 script.js 那样处理两个文件
// 但通常 KNN Classifier 的 export/import 是一个单一 JSON 文件
console.log(`尝试从 CDN 自动加载 KNN 模型数据: ${cdnJsonUrl}`);
updateGlobalStatus('正在尝试从 CDN 加载手势识别模型...', 'loading');
try {
await loadKNNModelData(null, cdnJsonUrl); // 传递 CDN URL不传文件
updateGlobalStatus('CDN 手势识别模型加载成功!', 'success');
isModelLoaded = true;
importModelBtn.disabled = true; // 自动加载成功后禁用手动导入
startStopBtn.disabled = false;
} catch (cdnError) {
console.warn('CDN KNN 模型数据自动加载失败:', cdnError);
updateGlobalStatus(`CDN 模型加载失败: ${cdnError.message}。请手动导入模型。`, 'warning');
isModelLoaded = false;
importModelBtn.disabled = false; // CDN 失败,允许手动导入
startStopBtn.disabled = true;
}
// --- 结束 CDN 自动加载 ---
} catch (error) {
console.error("应用初始化失败:", error);
updateGlobalStatus(`初始化失败: ${error.message}`, 'error');
alert(`应用初始化失败: ${error.message}\n请检查摄像头权限、网络连接或刷新页面。`);
}
}
// ==========================================================
// 摄像头和手部检测相关
// ==========================================================
async function setupCamera() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: {
width: { ideal: videoElement.width || 480 },
height: { ideal: videoElement.height || 360 },
facingMode: 'user'
}
});
videoElement.srcObject = stream;
return new Promise((resolve, reject) => {
videoElement.onloadedmetadata = () => {
videoElement.play().then(() => {
poseCanvas.width = videoElement.videoWidth;
poseCanvas.height = videoElement.videoHeight;
resolve();
}).catch(reject);
};
setTimeout(() => reject(new Error('摄像头元数据加载或播放超时')), 10000);
});
} catch (error) {
if (error.name === 'NotAllowedError') {
throw new Error('用户拒绝了摄像头权限。');
} else if (error.name === 'NotFoundError') {
throw new Error('未找到摄像头设备。');
} else {
throw error;
}
}
}
async function startDetectionLoop() {
if (!isHandDetectionReady) return;
async function detectAndPredict() {
try {
const hands = await detector.estimateHands(videoElement, { flipHorizontal: false });
poseCtx.clearRect(0, 0, poseCanvas.width, poseCanvas.height);
// 声明并初始化 currentConfidencePercentage
let currentConfidencePercentage = "0"; // 默认值为字符串 "0",避免未定义错误
if (hands && hands.length > 0) {
drawHand(hands[0]);
if (isModelLoaded && isPlaying) {
const handTensor = flattenHand(hands[0]);
if (classifier.getNumClasses() > 0) {
const prediction = await classifier.predictClass(handTensor);
handTensor.dispose();
const predictedClassId = prediction.label;
const confidence = prediction.confidences[predictedClassId];
// 始终计算并更新 currentConfidencePercentage
currentConfidencePercentage = (confidence * 100).toFixed(1);
if (confidence > MIN_CONFIDENCE_THRESHOLD) {
currentDetectedClassId = predictedClassId;
const noteInfo = gestureClassToAudioMap[predictedClassId];
if (noteInfo) {
// 这里使用 currentConfidencePercentage
currentGestureDisplay.textContent = `${noteInfo.name} (C:${currentConfidencePercentage}%)`;
if (currentPlayingActionId !== predictedClassId) {
playAudioForNote(predictedClassId, noteInfo.audio, noteInfo.name);
currentPlayingActionId = predictedClassId;
}
} else {
currentGestureDisplay.textContent = `未知音符 (ID:${predictedClassId})`;
currentPlayingActionId = null;
currentPlayingActionDisplay.textContent = '无';
}
} else {
// 置信度不足
currentDetectedClassId = null;
// 这里也使用 currentConfidencePercentage
currentGestureDisplay.textContent = `未识别 (C:${currentConfidencePercentage}%)`;
if(currentPlayingActionId !== null) {
stopAllPlayingAudios();
currentPlayingActionId = null;
currentPlayingActionDisplay.textContent = '无';
}
}
} else {
// 分类器没有数据
handTensor.dispose();
currentDetectedClassId = null;
currentGestureDisplay.textContent = '模型无数据';
currentPlayingActionId = null;
currentPlayingActionDisplay.textContent = '无';
stopAllPlayingAudios();
}
} else {
// 非演奏模式或未加载模型
currentDetectedClassId = null;
currentGestureDisplay.textContent = '静止';
currentPlayingActionId = null;
currentPlayingActionDisplay.textContent = '无';
stopAllPlayingAudios();
}
} else {
// 未检测到手部
currentDetectedClassId = null;
currentGestureDisplay.textContent = '请将手放入画面';
currentPlayingActionId = null;
currentPlayingActionDisplay.textContent = '无';
stopAllPlayingAudios();
}
// 无论如何,确保 confidenceDisplay 总是更新
confidenceDisplay.textContent = `${currentConfidencePercentage}%`;
} catch (error) {
console.error('手势检测或预测出错:', error);
updateGlobalStatus(`检测错误: ${error.message}`, 'error');
} finally {
animationFrameId = requestAnimationFrame(detectAndPredict);
}
}
animationFrameId = requestAnimationFrame(detectAndPredict);
}
function flattenHand(hand) {
const keypoints = hand.keypoints.map(p => [p.x / videoElement.videoWidth, p.y / videoElement.videoHeight]).flat();
return tf.tensor(keypoints);
}
function drawHand(hand) {
if (hand.keypoints) {
const keypoints = hand.keypoints;
poseCtx.strokeStyle = '#3498db'; /* 蓝色连接线 */
poseCtx.lineWidth = 3;
poseCtx.shadowColor = '#3498db';
poseCtx.shadowBlur = 5;
for (const connection of HAND_CONNECTIONS) {
const start = keypoints[connection[0]];
const end = keypoints[connection[1]];
if (start && end) {
poseCtx.beginPath();
poseCtx.moveTo(start.x, start.y);
poseCtx.lineTo(end.x, end.y);
poseCtx.stroke();
}
}
poseCtx.fillStyle = '#f1c40f'; /* 黄色关键点 */
poseCtx.shadowColor = '#f1c40f';
poseCtx.shadowBlur = 8;
for (const keypoint of keypoints) {
if (keypoint) {
poseCtx.beginPath();
poseCtx.arc(keypoint.x, keypoint.y, 5, 0, 2 * Math.PI);
poseCtx.fill();
}
}
poseCtx.shadowBlur = 0;
poseCtx.shadowColor = 'transparent';
}
}
// ==========================================================
// 模型导入 - 改造为支持文件和 URL 两种方式
// ==========================================================
/**
* 加载 KNN 模型数据,支持从文件或 CDN URL 加载。
* @param {File} [file] - 可选,用户选择的 KNN 模型 JSON 文件。
* @param {string} [cdnUrl] - 可选KNN 模型 JSON 文件的 CDN URL。
* @returns {Promise<void>}
*/
async function loadKNNModelData(file = null, cdnUrl = null) {
updateGlobalStatus('正在加载模型...', 'loading');
lockControls(true);
try {
let loadedModelData;
if (file) {
const reader = new FileReader();
const fileReadPromise = new Promise((resolve, reject) => {
reader.onload = e => resolve(JSON.parse(e.target.result));
reader.onerror = error => reject(new Error('文件读取失败。'));
reader.readAsText(file);
});
loadedModelData = await fileReadPromise;
} else if (cdnUrl) {
const response = await fetch(cdnUrl);
if (!response.ok) {
throw new Error(`无法从 ${cdnUrl} 加载模型数据: ${response.statusText}`);
}
loadedModelData = await response.json();
} else {
throw new Error('未提供模型文件或 CDN URL。');
}
// 确保模型包含 classMap 和 dataset
if (!loadedModelData || !loadedModelData.dataset || !loadedModelData.classMap) {
throw new Error('模型数据结构不正确 (缺少 classMap 或 dataset)。');
}
classifier.clearAllClasses();
const dataset = {};
let totalExamples = 0;
for (const classId in loadedModelData.dataset) {
const classData = loadedModelData.dataset[classId];
if (classData && classData.length > 0) {
// 验证数据格式
if (classData.some(sample => !Array.isArray(sample) || sample.some(val => typeof val !== 'number'))) {
throw new Error(`类别 ${classId} 包含无效的样本数据 (不是数字数组)。`);
}
const tensors = classData.map(data => tf.tensor1d(data));
const stacked = tf.stack(tensors);
dataset[classId] = stacked;
totalExamples += classData.length;
tensors.forEach(t => t.dispose());
} else {
console.warn(`类别 ${classId} 没有样本数据。`);
}
}
classifier.setClassifierDataset(dataset);
// 可以在这里根据 loadedModelData.classMap 动态更新 gestureClassToAudioMap 中的 name 字段
// 以便 UI 上的音符名称直接从训练模型中获取。
// 例如:
// loadedModelData.classMap.forEach(item => {
// if (gestureClassToAudioMap[item.classId]) {
// gestureClassToAudioMap[item.classId].name = item.className;
// }
// });
updateGestureMappingUI(); // 再次调用以确保UI更新
console.log(`模型加载成功!共加载 ${totalExamples} 个样本。`);
console.log('导入类别映射:', loadedModelData.classMap);
isModelLoaded = true;
lockControls(false);
importModelBtn.disabled = true; // 成功加载后应禁用手动导入,除非你想支持重新导入
startStopBtn.disabled = false;
updateGlobalStatus('手势模型加载成功!', 'success');
} catch (error) {
console.error('模型加载失败:', error);
updateGlobalStatus(`模型加载失败: ${error.message}`, 'error');
alert(`模型加载失败: ${error.message}\n请确保文件是正确的模型JSON文件或 CDN URL 可访问。`);
isModelLoaded = false;
lockControls(false);
importModelBtn.disabled = false; // 失败后允许手动导入
startStopBtn.disabled = true;
throw error; // 重新抛出错误以便调用者如initApp能捕获
} finally {
fileImporter.value = ''; // 清除文件选择器的值,以便可以再次选择相同文件
}
}
// ==========================================================
// 音频播放逻辑
// ==========================================================
function preloadAudios() {
for (const key in gestureClassToAudioMap) {
const audioObj = gestureClassToAudioMap[key].audio;
if (audioObj) {
audioObj.load();
console.log(`预加载音频: ${audioObj.src}`);
}
}
}
// 播放单个音符
function playAudioForNote(noteId, audioObj, noteName) {
// 每次播放新音符时,停止之前所有可能正在播放的音符
stopAllPlayingAudios();
// 确保音频从头开始播放
if (audioObj.readyState >= 2) {
audioObj.currentTime = 0;
}
audioObj.play().catch(e => {
console.warn('音频播放失败 (可能需要用户交互):', e);
// 可以在这里提示用户点击任意地方进行交互,以便后续播放音频
// 例如document.body.addEventListener('click', () => audio.play(), { once: true });
});
currentPlayingActionDisplay.textContent = noteName;
}
// 停止所有正在播放的音频
function stopAllPlayingAudios() {
for (const key in gestureClassToAudioMap) {
const audioObj = gestureClassToAudioMap[key].audio;
if (audioObj && !audioObj.paused) {
audioObj.pause();
audioObj.currentTime = 0; // 重置到开始
}
}
}
// ==========================================================
// UI 更新和控制
// ==========================================================
function updateGlobalStatus(message, type = 'info') {
globalStatusDisplay.textContent = message;
// 重置颜色以便不同类型的状态信息有不同的视觉反馈
globalStatusDisplay.style.color = '#bdc3c7'; // 默认颜色
if (type === 'error') {
globalStatusDisplay.style.color = '#e74c3c'; /* 红色 */
} else if (type === 'success') {
globalStatusDisplay.style.color = '#2ecc71'; /* 绿色 */
} else if (type === 'warning') {
globalStatusDisplay.style.color = '#f1c40f'; /* 黄色 */
}
// loading 状态可以考虑添加一个动画或特定的颜色
}
function lockControls(lock) {
// 在某些状态下,即使不处于锁定状态,按钮也可能被特定逻辑禁用
// 所以这里只处理整体的禁用/启用
importModelBtn.disabled = lock || isModelLoaded; // 如果模型已加载,手动导入按钮通常也应禁用
startStopBtn.disabled = lock || !isModelLoaded; // 必须加载模型才能开始
}
function togglePlaying() {
if (!isModelLoaded) {
alert('请先导入手势模型!');
return;
}
if (classifier.getNumClasses() === 0) {
alert('已导入的模型中没有训练数据,请导入一个有效的模型文件。');
return;
}
// 这里的警告信息如果是在自动加载时已经给出,可以考虑不重复。
// 考虑到用户可能会忽略,此处保留。
if (classifier.getNumClasses() < Object.keys(gestureClassToAudioMap).length && classifier.getNumClasses() > 0) {
alert(`警告:导入的模型只包含 ${classifier.getNumClasses()} 个类别,但需要 ${Object.keys(gestureClassToAudioMap).length} 个音符手势。请确保导入完整的模型!若要继续,点击确定。`);
}
isPlaying = !isPlaying;
if (isPlaying) {
startStopBtn.textContent = '停止演奏';
startStopBtn.classList.add('playing');
importModelBtn.disabled = true; // 演奏时不能切换模型
updateGlobalStatus('开始演奏,请摆出您的钢琴手势!', 'info'); // 文本修改
currentPlayingActionDisplay.textContent = '无';
} else {
startStopBtn.textContent = '开始演奏';
startStopBtn.classList.remove('playing');
importModelBtn.disabled = false; // 停止演奏后允许重新导入
updateGlobalStatus('已停止演奏,等待您开始。', 'ready');
currentPlayingActionDisplay.textContent = '无';
currentPlayingActionId = null; // 停止演奏时重置当前播放音符ID
stopAllPlayingAudios(); // 停止所有正在播放的音频
}
currentDetectedClassId = null;
// currentConfidence 在 detectAndPredict 循环中更新
currentGestureDisplay.textContent = '静止';
confidenceDisplay.textContent = '0%';
}
function updateGestureMappingUI() {
gestureMappingList.innerHTML = '';
// 这种情况下,我们假设 mapping 列表应该显示所有预设的8个音符
const orderedNoteNames = [
'中央C (C4)', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', '高音C (C5)'
];
for (let i = 0; i < orderedNoteNames.length; i++) {
const classId = String(i); // 确保 classId 是字符串以匹配 map keys
const noteName = orderedNoteNames[i];
const listItem = document.createElement('li');
// 从 gestureClassToAudioMap 获取的 name 可以更准确地显示
const mappedNoteName = gestureClassToAudioMap[classId] ? gestureClassToAudioMap[classId].name : noteName;
listItem.innerHTML = `<strong>ID ${classId}:</strong> <span class="action-name">${mappedNoteName}</span> → 音段 ${i + 1}`;
gestureMappingList.appendChild(listItem);
}
// 如果 classifier 中有额外或不同的类别,也可以在这里显示
// for (const classId of classifier.getClassNames()) {
// if (!gestureClassToAudioMap[classId]) {
// const listItem = document.createElement('li');
// listItem.innerHTML = `<strong>ID ${classId}:</strong> <span class="action-name">未知/自定义手势</span>`;
// gestureMappingList.appendChild(listItem);
// }
// }
}
// --- 应用启动和清理 ---
window.onbeforeunload = () => {
if (animationFrameId) {
cancelAnimationFrame(animationFrameId);
}
if (detector) {
detector.dispose();
}
if (classifier) {
classifier.clearAllClasses();
}
tf.disposeAll();
stopAllPlayingAudios(); // 页面关闭时停止所有音频
console.log('Resources cleaned up.');
};
</script>
</body>
</html>