Geraldine commited on
Commit
002bcea
·
verified ·
1 Parent(s): b7c8072

Upload main.js

Browse files
Files changed (1) hide show
  1. main.js +212 -0
main.js ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { pipeline } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]";
2
+
3
+ // Initialize Ace Editor
4
+ const editor = ace.edit("editor");
5
+ editor.setTheme("ace/theme/monokai");
6
+ editor.session.setMode("ace/mode/xml");
7
+ //editor.setValue('<?xml version="1.0" encoding="UTF-8"?>\n<ead>\n <!-- EAD content will appear here -->\n</ead>');
8
+ //editor.setValue('<?xml version="1.0" encoding="UTF-8"?>\n');
9
+ editor.container.addEventListener('contextmenu', showContextMenu);
10
+
11
+ // Initialize Whisper pipeline
12
+ let whisperPipeline;
13
+ let mediaRecorder;
14
+ let audioChunks = [];
15
+
16
+ async function initWhisper() {
17
+ // Show the loading spinner
18
+ $('#loadingSpinner').show();
19
+ try {
20
+ whisperPipeline = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small',
21
+ {
22
+ device: "webgpu",
23
+ dtype: 'fp32'
24
+ },);
25
+ // Hide the loading spinner after the model is loaded
26
+ $('#loadingSpinner').hide();
27
+ $('#status').text('Ready to record');
28
+ } catch (e) {
29
+ $('#status').text('Error initializing Whisper: ' + e.message);
30
+ $('#loadingSpinner').hide();
31
+ }
32
+ }
33
+
34
+ // Initialize recording functionality
35
+ async function startRecording() {
36
+ try {
37
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
38
+ mediaRecorder = new MediaRecorder(stream);
39
+ audioChunks = [];
40
+
41
+ mediaRecorder.ondataavailable = (event) => {
42
+ audioChunks.push(event.data);
43
+ };
44
+
45
+ mediaRecorder.onstop = async () => {
46
+ const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
47
+ await processAudio(audioBlob);
48
+ };
49
+
50
+ mediaRecorder.start();
51
+ $('#startRecording').prop('disabled', true);
52
+ $('#stopRecording').prop('disabled', false);
53
+ $('#status').text('Recording...');
54
+ } catch (e) {
55
+ $('#status').text('Error starting recording: ' + e.message);
56
+ }
57
+ }
58
+
59
+ async function stopRecording() {
60
+ if (mediaRecorder && mediaRecorder.state === 'recording') {
61
+ mediaRecorder.stop();
62
+ $('#startRecording').prop('disabled', false);
63
+ $('#stopRecording').prop('disabled', true);
64
+ $('#status').text('Processing audio...');
65
+ }
66
+ }
67
+
68
+ async function processAudio(audioBlob) {
69
+ try {
70
+ // Create a URL for the audio blob
71
+ const audioUrl = URL.createObjectURL(audioBlob);
72
+
73
+ // Create a download link for the audio file (optional)
74
+ /*const downloadLink = document.createElement('a');
75
+ downloadLink.href = audioUrl;
76
+ downloadLink.download = 'recording.wav';
77
+ downloadLink.textContent = 'Download audio file';
78
+ document.body.appendChild(downloadLink);*/
79
+ // Pass the Float32Array to the whisperPipeline
80
+ const transcription = await whisperPipeline(audioUrl);
81
+ $('#transcription').val(transcription.text);
82
+
83
+ // Clean up the URL object
84
+ URL.revokeObjectURL(audioUrl);
85
+ } catch (e) {
86
+ $('#status').text('Error processing audio: ' + e.message);
87
+ }
88
+ }
89
+
90
+ // Function to send the final user prompt to the Ollama model
91
+ async function sendPrompt() {
92
+ const transcription = $('#transcription').val(); // Get transcription
93
+ const context = $('#context').text(); // Get context
94
+ const userPrompt = `${transcription}\n#Context:${context}`; // Combine both
95
+ console.log(userPrompt)
96
+
97
+ // Send transcription to Ollama server for EAD/XML generation
98
+ const response = await fetch('http://129.80.86.176:11434/api/generate', {
99
+ method: 'POST',
100
+ headers: {
101
+ 'Content-Type': 'application/json',
102
+ },
103
+ body: JSON.stringify({
104
+ model: 'hf.co/Geraldine/FineLlama-3.2-3B-Instruct-ead-GGUF:Q5_K_M',
105
+ prompt: `Generate EAD/XML for the following archival description: ${userPrompt}`,
106
+ stream: false
107
+ })
108
+ });
109
+
110
+ const data = await response.json();
111
+ editor.setValue(data.response);
112
+ $('#status').text('Ready');
113
+ }
114
+
115
+ // Add this function to handle right-click context menu
116
+ function showContextMenu(event) {
117
+ event.preventDefault(); // Prevent the default context menu
118
+
119
+ const selectedText = editor.getSelectedText();
120
+ if (selectedText) {
121
+ // Create a context menu if it doesn't exist
122
+ let $contextMenu = $('#contextMenu');
123
+ if ($contextMenu.length === 0) {
124
+ $contextMenu = $('<div>', {
125
+ id: 'contextMenu',
126
+ css: {
127
+ position: 'absolute',
128
+ backgroundColor: 'white',
129
+ border: '1px solid #ccc',
130
+ zIndex: 1000,
131
+ display: 'none' // Initially hidden
132
+ }
133
+ }).append('<button id="addToContext">Add to Context</button>').appendTo('body');
134
+ }
135
+
136
+ // Position the context menu
137
+ $contextMenu.css({
138
+ left: `${event.pageX}px`,
139
+ top: `${event.pageY}px`,
140
+ display: 'block' // Show the context menu
141
+ });
142
+
143
+ // Add event listener for the "Add to Context" button
144
+ $('#addToContext').off('click').on('click', () => {
145
+ addToContext(selectedText);
146
+ $contextMenu.hide(); // Hide the context menu
147
+ });
148
+ }
149
+ }
150
+
151
+ // Function to add selected text to the context div
152
+ function addToContext(selectedText) {
153
+ $('#context').text(selectedText); // Populate the context div with the selected text
154
+ }
155
+
156
+ function formatXmlInEditor() {
157
+ const xmlContent = editor.getValue();
158
+ const formattedXml = formatXml(xmlContent);
159
+ editor.setValue(formattedXml, 1);
160
+ }
161
+
162
+ function formatXml(xml, tab) {
163
+ let formatted = '';
164
+ let indentLevel = 0;
165
+ tab = tab || ' '; // Use two spaces for indentation
166
+
167
+ // Remove unnecessary spaces between tags and normalize newlines
168
+ xml = xml.replace(/>\s*</g, '><').trim();
169
+
170
+ // Split by tags
171
+ xml.split(/(<[^>]+>)/g).forEach(node => {
172
+ if (node.trim()) {
173
+ if (node.startsWith('</')) {
174
+ // Closing tag - decrease indentation
175
+ indentLevel--;
176
+ formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
177
+ } else if (node.startsWith('<') && !node.endsWith('/>')) {
178
+ // Opening tag - add the tag and then increase indentation
179
+ formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
180
+ indentLevel++;
181
+ } else if (node.startsWith('<') && node.endsWith('/>')) {
182
+ // Self-closing tag - add it at the current level
183
+ formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
184
+ } else {
185
+ // Text content - keep it at the current indentation level
186
+ formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
187
+ }
188
+ }
189
+ });
190
+
191
+ return formatted.trim();
192
+ }
193
+
194
+
195
+
196
+ // Event listeners
197
+ $('#startRecording').on('click', startRecording);
198
+ $('#stopRecording').on('click', stopRecording);
199
+
200
+ // Hide context menu on click elsewhere
201
+ $(document).on('click', () => {
202
+ $('#contextMenu').hide();
203
+ });
204
+
205
+ // Add event listener for the send prompt button
206
+ $('#sendPrompt').on('click', sendPrompt);
207
+
208
+ // Add event listener for the prettify button
209
+ $('#prettifyXML').on('click', formatXmlInEditor);
210
+
211
+ // Initialize Whisper on page load
212
+ initWhisper();