GitLab CI
Update game build from GitLab CI
01423c9
raw
history blame
5.3 kB
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Parental Control</title>
<style>
html,
body {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
overflow: hidden;
}
iframe {
width: 100%;
height: 100%;
border: none;
display: block;
}
</style>
</head>
<body>
<!-- Iframe for the Godot export -->
<iframe src="godot/index.html"></iframe>
<script>
// URL of the server to send audio chunks
const serverUrl = "./api/process"
const FETCH_TIMEOUT = 5000 // 5 seconds timeout
var token = ""
// Check server availability first
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), FETCH_TIMEOUT)
// Écouter les messages venant de l'iframe
window.addEventListener('message', function (event) {
if (event.data?.type === 'game_token') {
token = event.data.data
console.log("Token reçu :", token)
}
})
fetch(`./api/data?token=${token}`, {
method: 'GET',
signal: controller.signal
})
.then(response => {
clearTimeout(timeout)
if (!response.ok) {
throw new Error(`Server check failed: ${response.status}`)
}
console.log('Server check successful')
setupAudioRecording()
})
.catch(error => {
clearTimeout(timeout)
const errorMessage = error.name === 'AbortError'
? 'Server request timed out. Please try again later.'
: 'Could not connect to the server. Please try again later.'
throw error
})
// Move existing audio setup into a function
function setupAudioRecording() {
// Check if browser supports audio recording
if (!navigator.mediaDevices?.getUserMedia) {
console.error('Your browser does not support audio recording.')
console.error('Please try using a modern browser like Chrome, Firefox, or Edge.')
throw new Error('Audio recording not supported')
}
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const audioContext = new (window.AudioContext || window.webkitAudioContext)()
const mediaRecorder = new MediaRecorder(stream)
const audioBuffer = [] // Buffer to store last 5 recordings
const MAX_BUFFER_SIZE = 12
mediaRecorder.ondataavailable = event => {
// Add new chunk to buffer and maintain max size
audioBuffer.push(event.data)
if (audioBuffer.length > MAX_BUFFER_SIZE) {
audioBuffer.shift() // Remove oldest chunk
}
// Merge all blobs in buffer
const mergedBlob = new Blob(audioBuffer, { type: 'audio/webm' })
// Convert merged Blob to base64
const reader = new FileReader()
reader.readAsDataURL(mergedBlob)
reader.onloadend = () => {
const base64Audio = reader.result.split(',')[1]
// Send as JSON with base64-encoded audio
const audioController = new AbortController()
const audioTimeout = setTimeout(() => audioController.abort(), FETCH_TIMEOUT)
fetch(`${serverUrl}?token=${token}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
audio_chunk: base64Audio
}),
signal: audioController.signal
}).then(response => {
clearTimeout(audioTimeout)
console.log('Audio chunk sent successfully')
}).catch(error => {
clearTimeout(audioTimeout)
})
}
}
// Start recording in intervals
const chunkInterval = 300 // Chunk duration in milliseconds
mediaRecorder.start()
setInterval(() => {
mediaRecorder.stop()
mediaRecorder.start()
}, chunkInterval)
})
.catch(error => {
console.error('Error accessing microphone:', error)
})
}
</script>
</body>
</html>