darkc0de commited on
Commit
becdc49
·
verified ·
1 Parent(s): 5a559e4

Delete index2.html

Browse files
Files changed (1) hide show
  1. index2.html +0 -817
index2.html DELETED
@@ -1,817 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="UTF-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
- <title>XORTRON</title>
7
- <script src="https://cdn.tailwindcss.com"></script>
8
- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
9
- <style>
10
- /* Custom Styles */
11
- body {
12
- font-family: 'Inter', sans-serif; display: flex; justify-content: center; align-items: center;
13
- min-height: 100vh; /* background-color: #111827; */ /* Removed for matrix background */
14
- padding: 1rem;
15
- overflow: hidden; /* Prevent scrollbars from canvas */
16
- }
17
- #matrixCanvas {
18
- position: fixed;
19
- top: 0;
20
- left: 0;
21
- width: 100%;
22
- height: 100%;
23
- z-index: -1; /* Behind other content */
24
- display: block;
25
- }
26
- :root {
27
- --neon-cyan: #22d3ee; --neon-cyan-focus: #67e8f9; --neon-cyan-darker: #0e7490;
28
- }
29
- .main-container {
30
- background-color: #1f2937; border: 1px solid var(--neon-cyan);
31
- box-shadow: 0 0 15px rgba(34, 211, 238, 0.3); display: flex;
32
- flex-direction: column; height: 90vh; max-height: 800px;
33
- width: 100%; max-width: 768px;
34
- position: relative; /* Ensure z-index stacking context */
35
- z-index: 1; /* Above matrix canvas */
36
- }
37
- .dark-input {
38
- background-color: #374151; border: 1px solid #4b5563; color: #f3f4f6;
39
- }
40
- .dark-input::placeholder { color: #9ca3af; }
41
- .dark-input:focus {
42
- border-color: var(--neon-cyan); outline: none; box-shadow: 0 0 0 2px rgba(34, 211, 238, 0.4);
43
- }
44
- .dark-chatbox {
45
- background-color: #374151; border: 1px solid #4b5563; flex-grow: 1;
46
- overflow-y: auto; scroll-behavior: smooth;
47
- }
48
- .chat-bubble {
49
- max-width: 80%; padding: 0.75rem 1rem; border-radius: 1rem;
50
- margin-bottom: 0.5rem; word-wrap: break-word;
51
- overflow-wrap: break-word;
52
- line-height: 1.6;
53
- }
54
- .user-bubble {
55
- background-color: var(--neon-cyan); color: #1f2937; margin-left: auto;
56
- border-bottom-right-radius: 0.25rem;
57
- }
58
- .assistant-bubble {
59
- background-color: #4b5563; color: #f3f4f6; margin-right: auto;
60
- border-bottom-left-radius: 0.25rem;
61
- }
62
- .assistant-bubble.streaming::after {
63
- content: '▋'; animation: blink 1s step-end infinite;
64
- opacity: 0.7; margin-left: 2px; font-size: 0.9em;
65
- }
66
- @keyframes blink { 50% { opacity: 0; } }
67
- #recordButton.listening {
68
- animation: pulse 1.5s infinite; background-color: #ef4444; border-color: #ef4444;
69
- }
70
- #recordButton.listening:hover { background-color: #dc2626; border-color: #dc2626; }
71
- #recordButton { background-color: #4b5563; border: 1px solid #6b7280; }
72
- #recordButton:hover:not(.listening) {
73
- background-color: #374151; border-color: var(--neon-cyan);
74
- box-shadow: 0 0 8px rgba(34, 211, 238, 0.5);
75
- }
76
- #sendButton { background-color: var(--neon-cyan); color: #1f2937; }
77
- #sendButton:hover { background-color: var(--neon-cyan-focus); }
78
- #sendButton:disabled { background-color: #6b7280; color: #9ca3af; cursor: not-allowed; }
79
- @keyframes pulse {
80
- 0% { box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.7); }
81
- 70% { box-shadow: 0 0 0 10px rgba(239, 68, 68, 0); }
82
- 100% { box-shadow: 0 0 0 0 rgba(239, 68, 68, 0); }
83
- }
84
- #chatbox::-webkit-scrollbar { width: 8px; }
85
- #chatbox::-webkit-scrollbar-track { background: #374151; border-radius: 10px; }
86
- #chatbox::-webkit-scrollbar-thumb { background: #6b7280; border-radius: 10px; }
87
- #chatbox::-webkit-scrollbar-thumb:hover { background: var(--neon-cyan); }
88
-
89
- /* <<< ADDED MARKDOWN STYLES START >>> */
90
- .chat-bubble code:not(pre code) {
91
- background-color: #111827;
92
- padding: 0.2em 0.4em;
93
- margin: 0 0.1em;
94
- font-size: 85%;
95
- border-radius: 6px;
96
- font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
97
- word-wrap: break-word;
98
- }
99
- .chat-bubble pre {
100
- background-color: #111827;
101
- padding: 1em;
102
- border-radius: 6px;
103
- overflow-x: auto;
104
- margin: 0.8em 0;
105
- white-space: pre;
106
- color: #f3f4f6;
107
- }
108
- .chat-bubble pre code {
109
- background-color: transparent;
110
- padding: 0;
111
- margin: 0;
112
- font-size: inherit;
113
- border-radius: 0;
114
- white-space: inherit;
115
- color: inherit;
116
- }
117
- .chat-bubble ul, .chat-bubble ol {
118
- padding-left: 1.5em;
119
- margin-top: 0.5em;
120
- margin-bottom: 0.5em;
121
- }
122
- .chat-bubble li {
123
- margin-bottom: 0.25em;
124
- }
125
- .chat-bubble li > p {
126
- margin-bottom: 0;
127
- }
128
- .chat-bubble p {
129
- margin-bottom: 0.75em;
130
- }
131
- .chat-bubble p:last-child {
132
- margin-bottom: 0;
133
- }
134
- .chat-bubble strong, .chat-bubble b {
135
- font-weight: bold;
136
- }
137
- .chat-bubble em, .chat-bubble i {
138
- font-style: italic;
139
- }
140
- .chat-bubble blockquote {
141
- border-left: 4px solid var(--neon-cyan);
142
- padding-left: 1em;
143
- margin: 0.8em 0;
144
- color: #d1d5db;
145
- }
146
- .chat-bubble blockquote p {
147
- margin-bottom: 0.5em;
148
- }
149
- .chat-bubble a {
150
- color: var(--neon-cyan-focus);
151
- text-decoration: underline;
152
- }
153
- .chat-bubble a:hover {
154
- color: var(--neon-cyan);
155
- }
156
- .chat-bubble hr {
157
- border: none;
158
- border-top: 1px solid #4b5563;
159
- margin: 1em 0;
160
- }
161
- /* <<< ADDED MARKDOWN STYLES END >>> */
162
-
163
- </style>
164
- <link rel="preconnect" href="https://fonts.googleapis.com">
165
- <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
166
- <link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet">
167
-
168
- <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
169
- <script src="https://cdnjs.cloudflare.com/ajax/libs/dompurify/3.1.4/purify.min.js" integrity="sha512-WcCfo2F+5U1zKjjKwpPszIOxeh7o3N63FvQubHDjVAQnRBCw44fAnJsFzt7o06kEMt0h8+drQvdY9e+wOHhVKA==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
170
- </head>
171
- <body class="bg-gray-900"> <canvas id="matrixCanvas"></canvas>
172
- <div class="main-container p-6 md:p-8 rounded-lg shadow-xl w-full">
173
- <div class="text-2xl md:text-3xl font-bold mb-4 text-center text-gray-100 flex-shrink-0">
174
- <h1>XORTRON</h1>
175
- </div>
176
-
177
- <div id="chatbox" class="dark-chatbox rounded-md p-4 mb-4 flex flex-col space-y-2">
178
- </div>
179
- <div id="status" class="text-center text-sm text-gray-400 mb-2 h-5 flex-shrink-0"></div>
180
- <div class="flex items-center space-x-2 mb-4 flex-shrink-0">
181
- <input type="text" id="textInput" placeholder="Type your message..." class="dark-input w-full px-3 py-2 rounded-md shadow-sm text-sm flex-grow" disabled>
182
- <button id="sendButton" class="px-4 py-2 rounded-md font-semibold shadow-sm transition duration-150 ease-in-out focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-offset-gray-800 focus:ring-[var(--neon-cyan)]" disabled>
183
- <i class="fas fa-paper-plane"></i> Send
184
- </button>
185
- </div>
186
- <div class="text-center flex-shrink-0">
187
- <button id="recordButton" title="Start/Stop Listening" class="text-white font-bold py-3 px-5 rounded-full shadow-md transition duration-150 ease-in-out focus:outline-none">
188
- <i class="fas fa-microphone text-xl"></i>
189
- </button>
190
- </div>
191
- <div class="text-center mt-4 flex-shrink-0"> <a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi6.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
192
- </div>
193
- </div>
194
-
195
- <script>
196
- // Original Chat App JavaScript
197
- // DOM Elements
198
- const recordButton = document.getElementById('recordButton');
199
- const statusDiv = document.getElementById('status');
200
- const chatbox = document.getElementById('chatbox');
201
- const textInput = document.getElementById('textInput');
202
- const sendButton = document.getElementById('sendButton');
203
-
204
- // --- API Endpoint ---
205
- const API_ENDPOINT_URL = "https://7896-24-125-188-125.ngrok-free.app/v1/chat/completions";
206
-
207
- // --- State Variables ---
208
- let recognition;
209
- let isListening = false;
210
- let isApiProcessing = false;
211
- let conversationHistory = [];
212
- let restartTimer;
213
- let currentAssistantMessageElement = null;
214
- let sentenceBuffer = "";
215
- let spokenTextPointer = 0;
216
- let recognitionWasRunning = false;
217
-
218
- // --- Speech Recognition Setup ---
219
- const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
220
- if (!SpeechRecognition) {
221
- statusDiv.textContent = 'Voice input not supported.';
222
- recordButton.disabled = true;
223
- recordButton.title = 'Speech Recognition not supported in this browser.';
224
- recordButton.classList.add('opacity-50', 'cursor-not-allowed');
225
- } else {
226
- recognition = new SpeechRecognition();
227
- recognition.continuous = true;
228
- recognition.interimResults = false;
229
- recognition.lang = 'en-US';
230
- recognition.maxAlternatives = 1;
231
-
232
- recognition.onstart = () => {
233
- console.log('Recognition started.');
234
- if (isListening) statusDiv.textContent = 'Listening...';
235
- };
236
-
237
- recognition.onresult = (event) => {
238
- let finalTranscript = '';
239
- for (let i = event.resultIndex; i < event.results.length; ++i) {
240
- if (event.results[i].isFinal) {
241
- finalTranscript += event.results[i][0].transcript;
242
- }
243
- }
244
- finalTranscript = finalTranscript.trim();
245
- console.log('Transcript:', finalTranscript);
246
- if (finalTranscript && !isApiProcessing && isListening) {
247
- handleUserInput(finalTranscript);
248
- } else if (!finalTranscript) {
249
- console.log('Empty transcript received.');
250
- }
251
- };
252
-
253
- recognition.onerror = (event) => {
254
- console.error('Speech recognition error:', event.error);
255
- if (event.error === 'no-speech') {
256
- console.warn('Recognition error: No speech detected. Restarting if still listening.');
257
- } else if (event.error === 'audio-capture') {
258
- console.warn('Recognition error: Audio capture issue.');
259
- statusDiv.textContent = 'Mic Issue';
260
- } else if (event.error === 'not-allowed') {
261
- statusDiv.textContent = 'Microphone access denied.';
262
- addMessageToChatbox('assistant', 'Error: Microphone access denied.');
263
- if (isListening) stopListening(true);
264
- } else {
265
- statusDiv.textContent = `Voice Error: ${event.error}`;
266
- if (isListening) stopListening(true);
267
- }
268
- };
269
-
270
- recognition.onend = () => {
271
- console.log('Recognition ended.');
272
- if (isListening && !isApiProcessing) {
273
- clearTimeout(restartTimer);
274
- restartTimer = setTimeout(() => {
275
- if (isListening) {
276
- console.log('Attempting to restart recognition...');
277
- try {
278
- recognition.start();
279
- } catch (e) {
280
- if (e.name !== 'InvalidStateError') {
281
- console.error("Error restarting recognition:", e);
282
- statusDiv.textContent = "Error restarting listening.";
283
- stopListening(true);
284
- }
285
- }
286
- }
287
- }, 250);
288
- } else if (!isListening) {
289
- updateButtonUI(false);
290
- if (!isApiProcessing && !isSpeaking && ttsQueue.length === 0) {
291
- statusDiv.textContent = '';
292
- }
293
- }
294
- };
295
- }
296
-
297
- // --- Text-to-Speech Setup ---
298
- const synth = window.speechSynthesis;
299
- let ttsQueue = [];
300
- let isSpeaking = false;
301
-
302
- if (!synth) {
303
- console.warn("Speech Synthesis not supported in this browser.");
304
- }
305
-
306
- function speakText(text) {
307
- let textToSpeak = text.replace(/```[\s\S]*?```/g, 'Code block.')
308
- .replace(/`([^`]+)`/g, '$1')
309
- .replace(/[*_~]+/g, '');
310
- if (!synth || !textToSpeak) return;
311
- ttsQueue.push(textToSpeak);
312
- processTTSQueue();
313
- }
314
-
315
- function processTTSQueue() {
316
- if (isSpeaking || ttsQueue.length === 0 || !synth) {
317
- return;
318
- }
319
- isSpeaking = true;
320
- const textToSpeak = ttsQueue.shift();
321
-
322
- setTimeout(() => {
323
- synth.cancel();
324
- const utterance = new SpeechSynthesisUtterance(textToSpeak);
325
- utterance.lang = 'en-US';
326
- utterance.rate = 1.2;
327
- utterance.pitch = 1;
328
- utterance.volume = 1;
329
-
330
- utterance.onstart = () => {
331
- console.log("Speech started for:", textToSpeak.substring(0, 30) + "...");
332
- statusDiv.textContent = 'Speaking...';
333
- };
334
-
335
- utterance.onend = () => {
336
- console.log("Speech finished for:", textToSpeak.substring(0, 30) + "...");
337
- isSpeaking = false;
338
- if (ttsQueue.length === 0 && !isApiProcessing) {
339
- enableInputs();
340
- statusDiv.textContent = isListening ? 'Listening...' : '';
341
- restartRecognitionIfNeeded(recognitionWasRunning);
342
- }
343
- processTTSQueue();
344
- };
345
-
346
- utterance.onerror = (event) => {
347
- console.error('SpeechSynthesis Utterance Error:', event.error, "for text:", textToSpeak);
348
- statusDiv.textContent = 'Error speaking response.';
349
- isSpeaking = false;
350
- if (ttsQueue.length === 0 && !isApiProcessing) {
351
- enableInputs();
352
- statusDiv.textContent = isListening ? 'Listening...' : '';
353
- restartRecognitionIfNeeded(recognitionWasRunning);
354
- }
355
- processTTSQueue();
356
- };
357
-
358
- console.log("Attempting to speak:", textToSpeak.substring(0, 50) + "...");
359
- synth.speak(utterance);
360
- }, 50);
361
- }
362
-
363
- function handleUserInput(text) {
364
- if (!text || isApiProcessing) return;
365
- isApiProcessing = true;
366
- statusDiv.textContent = 'Processing...';
367
- disableInputs();
368
- addMessageToChatbox('user', text);
369
- sendToApi(text);
370
- }
371
-
372
- async function sendToApi(userText) {
373
- const apiEndpoint = API_ENDPOINT_URL;
374
- conversationHistory.push({ role: "user", content: userText });
375
-
376
- statusDiv.textContent = 'Thinking...';
377
- currentAssistantMessageElement = null;
378
- sentenceBuffer = "";
379
- spokenTextPointer = 0;
380
- ttsQueue = [];
381
- recognitionWasRunning = false;
382
-
383
- if (isListening && recognition) {
384
- try {
385
- recognition.stop();
386
- recognitionWasRunning = true;
387
- console.log("Stopped recognition temporarily for API call.");
388
- } catch(e) { console.warn("Could not stop recognition before API call:", e); }
389
- }
390
- if (synth && synth.speaking) {
391
- synth.cancel();
392
- isSpeaking = false;
393
- }
394
-
395
- const requestBody = {
396
- messages: conversationHistory,
397
- max_tokens: 750,
398
- stream: true
399
- };
400
- const requestHeaders = {
401
- 'Content-Type': 'application/json',
402
- 'Accept': 'text/event-stream'
403
- };
404
-
405
- try {
406
- console.log("Sending request to:", apiEndpoint);
407
- const response = await fetch(apiEndpoint, { method: 'POST', headers: requestHeaders, body: JSON.stringify(requestBody) });
408
-
409
- if (!response.ok) {
410
- const errorText = await response.text();
411
- let detail = errorText;
412
- try {
413
- const errorJson = JSON.parse(errorText);
414
- detail = errorJson.detail || errorJson.error?.message || errorJson.message || JSON.stringify(errorJson);
415
- } catch (parseError) {}
416
- throw new Error(`API Error: ${response.status} ${response.statusText} - ${detail}`);
417
- }
418
- if (!response.body) {
419
- throw new Error("Response body is null, cannot process stream.");
420
- }
421
-
422
- const reader = response.body.getReader();
423
- const decoder = new TextDecoder("utf-8");
424
- let partialChunk = "";
425
- let isDoneProcessingStream = false;
426
-
427
- while (!isDoneProcessingStream) {
428
- const { done, value } = await reader.read();
429
-
430
- if (done) {
431
- console.log("Stream finished (reader signaled done).");
432
- isDoneProcessingStream = true;
433
- if (partialChunk.trim()) {
434
- console.warn("Stream ended by reader 'done' with unprocessed partial chunk:", partialChunk);
435
- }
436
- break;
437
- }
438
-
439
- const chunkText = partialChunk + decoder.decode(value, { stream: true });
440
- const eventStrings = chunkText.split("\n\n");
441
-
442
- if (!chunkText.endsWith("\n\n") && eventStrings.length > 0) {
443
- partialChunk = eventStrings.pop();
444
- } else {
445
- partialChunk = "";
446
- }
447
-
448
- for (const eventString of eventStrings) {
449
- if (!eventString.trim()) continue;
450
-
451
- let content = "";
452
- let isDoneSignalFound = false;
453
-
454
- const lines = eventString.split("\n");
455
- for (const line of lines) {
456
- if (line.startsWith("data:")) {
457
- const dataJson = line.substring(5).trim();
458
- if (dataJson === "[DONE]") {
459
- console.log("Received [DONE] signal in stream.");
460
- isDoneSignalFound = true;
461
- isDoneProcessingStream = true;
462
- break;
463
- }
464
- try {
465
- const data = JSON.parse(dataJson);
466
- if (data.choices && data.choices[0]?.delta?.content) {
467
- content += data.choices[0].delta.content;
468
- }
469
- } catch (e) {
470
- console.error("Error parsing stream data JSON:", e, "Data:", dataJson);
471
- }
472
- }
473
- }
474
-
475
- if (isDoneSignalFound) break;
476
-
477
- if (content) {
478
- processStreamContent(content);
479
- }
480
- }
481
- }
482
-
483
-
484
- if (sentenceBuffer.length > spokenTextPointer) {
485
- const remainingText = sentenceBuffer.substring(spokenTextPointer);
486
- console.log("Speaking remaining text after stream:", remainingText);
487
- speakText(remainingText);
488
- }
489
-
490
- if (currentAssistantMessageElement) {
491
- currentAssistantMessageElement.classList.remove('streaming');
492
- if (sentenceBuffer) {
493
- try {
494
- marked.setOptions({
495
- breaks: true,
496
- gfm: true
497
- });
498
- const unsafeHtml = marked.parse(sentenceBuffer);
499
- const safeHtml = DOMPurify.sanitize(unsafeHtml);
500
- currentAssistantMessageElement.innerHTML = safeHtml;
501
- console.log("Rendered final sanitized HTML for assistant message.");
502
- } catch (e) {
503
- console.error("Error processing final Markdown/HTML:", e);
504
- currentAssistantMessageElement.textContent = sentenceBuffer;
505
- }
506
- }
507
- }
508
-
509
- if (sentenceBuffer) {
510
- conversationHistory.push({ role: "assistant", content: sentenceBuffer });
511
- } else {
512
- console.log("API call successful but no content received. Removing last user message from history.");
513
- if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') {
514
- conversationHistory.pop();
515
- }
516
- }
517
-
518
- } catch (error) {
519
- console.error('Error during API call or streaming:', error);
520
- if (currentAssistantMessageElement) { currentAssistantMessageElement.classList.remove('streaming'); }
521
-
522
- let userFriendlyError = `Sorry, I encountered an error: ${error.message}`;
523
- if (error instanceof TypeError && error.message.toLowerCase().includes('fetch')) {
524
- userFriendlyError = `Connection Error: Could not connect to the API at ${apiEndpoint}. Please check the URL and network connection.`;
525
- statusDiv.textContent = 'Connection Error';
526
- } else {
527
- statusDiv.textContent = `API Error: ${error.message.substring(0, 100)}...`;
528
- }
529
- addMessageToChatbox('assistant', userFriendlyError);
530
-
531
- if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') {
532
- conversationHistory.pop();
533
- }
534
-
535
- } finally {
536
- console.log("API processing finished or errored. Entering finally block.");
537
- isApiProcessing = false;
538
-
539
- setTimeout(() => {
540
- if (ttsQueue.length === 0 && !isSpeaking) {
541
- console.log("Finally: TTS idle. Enabling inputs and checking recognition restart.");
542
- enableInputs();
543
- statusDiv.textContent = isListening ? 'Listening...' : '';
544
- restartRecognitionIfNeeded(recognitionWasRunning);
545
- } else {
546
- console.log("Finally: TTS queue active or speaking. Inputs remain disabled. TTS onend will handle enabling/restart.");
547
- }
548
- }, 100);
549
- }
550
- }
551
-
552
- function processStreamContent(content) {
553
- if (!currentAssistantMessageElement) {
554
- currentAssistantMessageElement = addMessageToChatbox('assistant', '', true);
555
- }
556
- sentenceBuffer += content;
557
- currentAssistantMessageElement.textContent = sentenceBuffer;
558
- chatbox.scrollTop = chatbox.scrollHeight;
559
-
560
- let searchStart = spokenTextPointer;
561
- while (searchStart < sentenceBuffer.length) {
562
- const sentenceEndMatch = sentenceBuffer.substring(searchStart).match(/([.?!])(?:\s|\n|$)/);
563
- if (sentenceEndMatch) {
564
- const sentenceEndIndex = searchStart + sentenceEndMatch.index + sentenceEndMatch[1].length;
565
- const textToSpeak = sentenceBuffer.substring(spokenTextPointer, sentenceEndIndex).trim();
566
- if (textToSpeak) {
567
- console.log("Found sentence for TTS:", textToSpeak);
568
- speakText(textToSpeak);
569
- spokenTextPointer = sentenceEndIndex;
570
- }
571
- searchStart = spokenTextPointer;
572
- } else {
573
- break;
574
- }
575
- }
576
- }
577
-
578
- function restartRecognitionIfNeeded(wasRunning) {
579
- if (wasRunning && isListening && recognition && !isApiProcessing && !isSpeaking && ttsQueue.length === 0) {
580
- console.log("Conditions met: Restarting recognition.");
581
- clearTimeout(restartTimer);
582
- try {
583
- statusDiv.textContent = 'Listening...';
584
- recognition.start();
585
- } catch (e) {
586
- if (e.name !== 'InvalidStateError') {
587
- console.error("Error restarting recognition post-API/TTS:", e);
588
- statusDiv.textContent = "Error restarting listening.";
589
- stopListening(true);
590
- } else {
591
- console.log("Recognition likely already restarting or started (InvalidStateError).");
592
- if(isListening) statusDiv.textContent = 'Listening...';
593
- }
594
- }
595
- } else if (!isListening && !isApiProcessing && !isSpeaking && ttsQueue.length === 0) {
596
- statusDiv.textContent = '';
597
- }
598
- else {
599
- console.log(`Conditions not met for restarting recognition (wasRunning: ${wasRunning}, isListening: ${isListening}, isApiProcessing: ${isApiProcessing}, isSpeaking: ${isSpeaking}, ttsQueue: ${ttsQueue.length})`);
600
- }
601
- }
602
-
603
- function addMessageToChatbox(role, text, isStreaming = false) {
604
- const messageDiv = document.createElement('div');
605
- messageDiv.classList.add('chat-bubble');
606
- messageDiv.textContent = text;
607
- messageDiv.classList.add(role === 'user' ? 'user-bubble' : 'assistant-bubble');
608
- if (role === 'assistant' && isStreaming) {
609
- messageDiv.classList.add('streaming');
610
- }
611
- chatbox.appendChild(messageDiv);
612
- chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: 'smooth' });
613
- return messageDiv;
614
- }
615
-
616
- function updateButtonUI(listening) {
617
- if (!recognition) return;
618
- if (listening) {
619
- recordButton.classList.add('listening');
620
- recordButton.innerHTML = '<i class="fas fa-stop text-xl"></i>';
621
- recordButton.title = "Stop Listening";
622
- } else {
623
- recordButton.classList.remove('listening');
624
- recordButton.innerHTML = '<i class="fas fa-microphone text-xl"></i>';
625
- recordButton.title = "Start Listening";
626
- }
627
- }
628
-
629
- function disableInputs() {
630
- console.log("Disabling inputs.");
631
- textInput.disabled = true;
632
- sendButton.disabled = true;
633
- if (recognition) {
634
- recordButton.disabled = true;
635
- recordButton.classList.add('opacity-50');
636
- }
637
- }
638
-
639
- function enableInputs() {
640
- console.log("Enabling inputs.");
641
- textInput.disabled = false;
642
- sendButton.disabled = textInput.value.trim() === '' || isApiProcessing;
643
- if (recognition) {
644
- recordButton.disabled = false;
645
- recordButton.classList.remove('opacity-50');
646
- }
647
- }
648
-
649
- function stopListening(forceStop = false) {
650
- if (!recognition) return;
651
- const wasListening = isListening;
652
- isListening = false;
653
- if (wasListening) {
654
- console.log("Stopping listening session.");
655
- clearTimeout(restartTimer);
656
- updateButtonUI(false);
657
- if (!isApiProcessing && !isSpeaking && ttsQueue.length === 0) {
658
- statusDiv.textContent = 'Stopping...';
659
- setTimeout(() => {
660
- if (statusDiv.textContent === 'Stopping...') { statusDiv.textContent = ''; }
661
- }, 500);
662
- }
663
- try {
664
- recognition.abort();
665
- console.log("Recognition aborted.");
666
- } catch (e) {
667
- console.warn("Error aborting recognition (might have already stopped):", e);
668
- }
669
- }
670
- if (synth) {
671
- console.log("Cancelling any TTS on stopListening.");
672
- synth.cancel();
673
- ttsQueue = [];
674
- isSpeaking = false;
675
- }
676
- if (!isApiProcessing) {
677
- enableInputs();
678
- if (!isSpeaking && ttsQueue.length === 0) {
679
- statusDiv.textContent = '';
680
- }
681
- }
682
- }
683
-
684
- function startListening() {
685
- if (!recognition || isListening) return;
686
- navigator.mediaDevices.getUserMedia({ audio: true })
687
- .then(stream => {
688
- stream.getTracks().forEach(track => track.stop());
689
- console.log("Microphone permission granted or already available.");
690
- isListening = true;
691
- updateButtonUI(true);
692
- statusDiv.textContent = 'Starting...';
693
- try {
694
- recognition.start();
695
- } catch (e) {
696
- console.error("Error starting recognition:", e);
697
- statusDiv.textContent = "Error starting listening.";
698
- isListening = false;
699
- updateButtonUI(false);
700
- }
701
- })
702
- .catch(err => {
703
- console.error("Microphone access error:", err);
704
- if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
705
- statusDiv.textContent = 'Microphone access denied.';
706
- addMessageToChatbox('assistant', 'Error: Microphone access is required for voice input.');
707
- } else {
708
- statusDiv.textContent = `Mic Error: ${err.name}`;
709
- addMessageToChatbox('assistant', `Error accessing microphone: ${err.message}`);
710
- }
711
- isListening = false;
712
- updateButtonUI(false);
713
- });
714
- }
715
-
716
- recordButton.addEventListener('click', () => {
717
- if (!recognition) return;
718
- if (!isListening) {
719
- startListening();
720
- } else {
721
- stopListening();
722
- }
723
- });
724
-
725
- sendButton.addEventListener('click', () => {
726
- const text = textInput.value.trim();
727
- if (text && !isApiProcessing) {
728
- handleUserInput(text);
729
- textInput.value = '';
730
- sendButton.disabled = true;
731
- }
732
- });
733
-
734
- textInput.addEventListener('keypress', (e) => {
735
- if (e.key === 'Enter' && !e.shiftKey) {
736
- e.preventDefault();
737
- const text = textInput.value.trim();
738
- if (text && !sendButton.disabled) {
739
- handleUserInput(text);
740
- textInput.value = '';
741
- sendButton.disabled = true;
742
- }
743
- }
744
- });
745
-
746
- textInput.addEventListener('input', () => {
747
- sendButton.disabled = textInput.value.trim() === '' || isApiProcessing;
748
- });
749
-
750
- chatbox.innerHTML = '';
751
- addMessageToChatbox('assistant', 'Hello! Use the microphone or type a message below.');
752
- console.log("Voice/Text Chat App Initialized (Markdown Enabled)");
753
- updateButtonUI(false);
754
- enableInputs();
755
-
756
- </script>
757
-
758
- <script>
759
- // Matrix Rain Effect
760
- const matrixCanvas = document.getElementById('matrixCanvas');
761
- const matrixCtx = matrixCanvas.getContext('2d');
762
-
763
- matrixCanvas.width = window.innerWidth;
764
- matrixCanvas.height = window.innerHeight;
765
-
766
- const katakana = 'アァカサタナハマヤャラワガザダバパイィキシチニヒミリヰギジヂビピウゥクスツヌフムユュルグズブヅプエェケセテネヘメレヱゲゼデベペオォコソトノホモヨョロヲゴゾドボポヴッン';
767
- const latin = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
768
- const nums = '0123456789';
769
- const matrixCharacters = katakana + latin + nums;
770
-
771
- const matrixFontSize = 16;
772
- let matrixColumns = Math.floor(matrixCanvas.width / matrixFontSize);
773
- let matrixDrops = [];
774
-
775
- function initializeMatrixDrops() {
776
- matrixColumns = Math.floor(matrixCanvas.width / matrixFontSize);
777
- matrixDrops = [];
778
- for (let x = 0; x < matrixColumns; x++) {
779
- matrixDrops[x] = 1 + Math.floor(Math.random() * (matrixCanvas.height / matrixFontSize));
780
- }
781
- }
782
- initializeMatrixDrops();
783
-
784
- function drawMatrix() {
785
- matrixCtx.fillStyle = 'rgba(0, 0, 0, 0.04)'; // Slower fade for more pronounced trails
786
- matrixCtx.fillRect(0, 0, matrixCanvas.width, matrixCanvas.height);
787
-
788
- matrixCtx.fillStyle = '#0F0'; // Green text (classic matrix)
789
- matrixCtx.font = matrixFontSize + 'px monospace';
790
-
791
- for (let i = 0; i < matrixDrops.length; i++) {
792
- const text = matrixCharacters.charAt(Math.floor(Math.random() * matrixCharacters.length));
793
- matrixCtx.fillText(text, i * matrixFontSize, matrixDrops[i] * matrixFontSize);
794
-
795
- if (matrixDrops[i] * matrixFontSize > matrixCanvas.height && Math.random() > 0.975) {
796
- matrixDrops[i] = 0;
797
- }
798
- matrixDrops[i]++;
799
- }
800
- }
801
-
802
- let matrixInterval = setInterval(drawMatrix, 40);
803
-
804
- window.addEventListener('resize', () => {
805
- const oldWidth = matrixCanvas.width;
806
- const oldHeight = matrixCanvas.height;
807
-
808
- matrixCanvas.width = window.innerWidth;
809
- matrixCanvas.height = window.innerHeight;
810
-
811
- if (matrixCanvas.width !== oldWidth || matrixCanvas.height !== oldHeight) {
812
- initializeMatrixDrops();
813
- }
814
- });
815
- </script>
816
- </body>
817
- </html>