JusTalk / templates /index.html
mizz12
アプリとHTMLファイルを更新
d45f43f
raw
history blame
8.33 kB
<!DOCTYPE html>
<html lang="ja">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Voice Recorder Interface</title>
<style>
body {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
background-color: #121212;
color: white;
}
.chart {
width: 300px;
height: 300px;
margin-bottom: 50px; /* 円グラフとボタンの間隔を狭く */
}
.record-button {
position: fixed;
bottom: 30px;
width: 80px;
height: 80px;
background-color: transparent;
border-radius: 50%;
border: 4px solid white;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.4);
transition: all 0.2s ease;
}
.record-icon {
width: 60px;
height: 60px;
background-color: #d32f2f;
border-radius: 50%;
transition: all 0.2s ease;
}
.recording .record-icon {
width: 40px;
height: 40px;
border-radius: 10%;
}
.result-buttons {
margin-top: 5px; /* ボタン間の距離を少し縮める */
display: flex;
gap: 10px;
}
.result-button {
padding: 10px 20px;
background-color: #4caf50;
border: none;
border-radius: 5px;
color: white;
cursor: pointer;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.4);
transition: background-color 0.2s ease;
}
.result-button:hover {
background-color: #388e3c;
}
</style>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
</head>
<body>
<div class="chart">
<canvas id="speechChart"></canvas>
</div>
<button class="record-button" id="recordButton" onclick="toggleRecording()">
<div class="record-icon" id="recordIcon"></div>
</button>
<div class="result-buttons">
<button class="result-button" id="historyButton" onclick="showHistory()">
会話履歴を表示
</button>
<button class="result-button" id="feedbackButton" onclick="showResults()">
フィードバック画面を表示
</button>
</div>
<script>
let isRecording = false;
let mediaRecorder;
let audioChunks = [];
let recordingInterval;
let count_voice = 0;
let before_rate = 0;
// Chart.js の初期化
const ctx = document.getElementById("speechChart").getContext("2d");
const speechChart = new Chart(ctx, {
type: "doughnut",
data: {
labels: ["自分", "他の人"],
datasets: [
{
data: [30, 70],
backgroundColor: ["#4caf50", "#757575"],
},
],
},
options: {
responsive: true,
plugins: {
legend: {
display: true,
position: "bottom",
labels: { color: "white" },
},
},
},
});
async function toggleRecording() {
const recordButton = document.getElementById("recordButton");
if (!isRecording) {
// 録音開始
isRecording = true;
recordButton.classList.add("recording");
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
});
mediaRecorder = new MediaRecorder(stream);
audioChunks = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
audioChunks.push(event.data);
}
};
mediaRecorder.onstop = () => {
sendAudioChunks([...audioChunks]);
audioChunks = [];
};
mediaRecorder.start();
// 10秒ごとに自動で停止して送信
recordingInterval = setInterval(() => {
if (mediaRecorder && mediaRecorder.state === "recording") {
mediaRecorder.stop();
}
}, 10000);
} catch (error) {
console.error("マイクへのアクセスに失敗しました:", error);
isRecording = false;
recordButton.classList.remove("recording");
}
} else {
// 手動停止
isRecording = false;
recordButton.classList.remove("recording");
clearInterval(recordingInterval);
if (mediaRecorder && mediaRecorder.state === "recording") {
mediaRecorder.stop();
count_voice = 0;
before_rate = 0;
}
}
}
function sendAudioChunks(chunks) {
const audioBlob = new Blob(chunks, { type: "audio/wav" });
const reader = new FileReader();
reader.onloadend = () => {
const base64String = reader.result.split(",")[1]; // Base64エンコードされた音声データ
fetch("/upload_audio", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ audio_data: base64String }),
})
.then((response) => response.json())
.then((data) => {
if (data.error) {
alert("エラー: " + data.error);
console.error(data.details);
} else if (data.rate !== undefined) {
// 解析結果が返ってきた場合はチャートを更新
if (count_voice == 0) {
speechChart.data.datasets[0].data = [
data.rate,
100 - data.rate,
];
before_rate = data.rate;
} else if (count_voice == 1) {
let tmp_rate = (data.rate + before_rate) / 2; //データ数が二つだから平均をとる
speechChart.data.datasets[0].data = [
tmp_rate,
100 - tmp_rate,
];
console.log(before_rate, tmp_rate, 100 - tmp_rate);
before_rate = tmp_rate;
} else {
let tmp_rate = (data.rate + before_rate * 2) / 3; //過去のやつに重みを付けて三つで考える。
speechChart.data.datasets[0].data = [
tmp_rate,
100 - tmp_rate,
];
console.log(before_rate, tmp_rate, 100 - tmp_rate);
before_rate = tmp_rate; //ここのrateを保存しておく
}
count_voice++;
speechChart.update();
//lert('音声の解析が完了しました。自分の音声: ' + data.rate.toFixed(2) + '%, 他の人: ' + (100 - data.rate).toFixed(2) + '%');
} else {
alert("音声がバックエンドに送信されました。");
}
// 録音が継続中であれば、再度録音を開始(自動録音の連続処理)
if (
isRecording &&
mediaRecorder &&
mediaRecorder.state === "inactive"
) {
mediaRecorder.start();
}
})
.catch((error) => {
console.error("エラー:", error);
if (
isRecording &&
mediaRecorder &&
mediaRecorder.state === "inactive"
) {
mediaRecorder.start();
}
});
};
reader.readAsDataURL(audioBlob);
}
function showHistory() {
// 会話履歴表示の画面があれば、そのページへ遷移する例
window.location.href = "history";
//alert('会話履歴を表示する機能は未実装です。');
}
function showResults() {
// フィードバック画面へ遷移
window.location.href = "feedback";
}
</script>
</body>
</html>