File size: 6,956 Bytes
002bcea a98fbe8 002bcea a98fbe8 002bcea a98fbe8 002bcea a98fbe8 002bcea 15447ff 6a6e5bf 15447ff 6a6e5bf 15447ff 6a6e5bf 002bcea 6a6e5bf 002bcea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
import { pipeline } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]";
// Initialize Ace Editor
const editor = ace.edit("editor");
editor.setTheme("ace/theme/monokai");
editor.session.setMode("ace/mode/xml");
//editor.setValue('<?xml version="1.0" encoding="UTF-8"?>\n<ead>\n <!-- EAD content will appear here -->\n</ead>');
//editor.setValue('<?xml version="1.0" encoding="UTF-8"?>\n');
editor.container.addEventListener('contextmenu', showContextMenu);
// Initialize Whisper pipeline
let whisperPipeline;
let mediaRecorder;
let audioChunks = [];
async function initWhisper() {
// Show the loading spinner
$('#loadingSpinner').show();
try {
whisperPipeline = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small',
{
device: "webgpu",
dtype: 'fp32'
},);
// Hide the loading spinner after the model is loaded
$('#loadingSpinner').hide();
$('#status').text('Ready to record');
} catch (e) {
$('#status').text('Error initializing Whisper: ' + e.message);
$('#loadingSpinner').hide();
}
}
// Initialize recording functionality
async function startRecording() {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream);
audioChunks = [];
mediaRecorder.ondataavailable = (event) => {
audioChunks.push(event.data);
};
mediaRecorder.onstop = async () => {
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
await processAudio(audioBlob);
};
mediaRecorder.start();
$('#startRecording').prop('disabled', true);
$('#stopRecording').prop('disabled', false);
$('#status').text('Recording...');
} catch (e) {
$('#status').text('Error starting recording: ' + e.message);
}
}
async function stopRecording() {
if (mediaRecorder && mediaRecorder.state === 'recording') {
mediaRecorder.stop();
$('#startRecording').prop('disabled', false);
$('#stopRecording').prop('disabled', true);
$('#status').text('Processing audio...');
}
}
async function processAudio(audioBlob) {
$('#loadingSpinner').show();
try {
// Create a URL for the audio blob
const audioUrl = URL.createObjectURL(audioBlob);
// Pass the Float32Array to the whisperPipeline
const transcription = await whisperPipeline(audioUrl);
$('#loadingSpinner').hide();
$('#transcription').val(transcription.text);
// Clean up the URL object
URL.revokeObjectURL(audioUrl);
} catch (e) {
$('#loadingSpinner').hide();
$('#status').text('Error processing audio: ' + e.message);
}
}
// Function to send the final user prompt to the Ollama model
async function sendPrompt() {
$('#loadingSpinner').show();
const transcription = $('#transcription').val(); // Get transcription
const context = $('#context').text(); // Get context
const userPrompt = `${transcription}\n#Context:${context}`; // Combine both
// Send transcription to Ollama server for EAD/XML generation
const response = await fetch('http://129.80.86.176:11434/api/generate', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'hf.co/Geraldine/FineLlama-3.2-3B-Instruct-ead-GGUF:Q5_K_M',
prompt: `Generate EAD/XML for the following archival description: ${userPrompt}`,
stream: false
})
});
const data = await response.json();
/ Check if context is empty
if (context.trim() === '') {
// If context is empty, set the entire editor value
editor.setValue(data.response);
} else {
// Get the current selection range in the editor
const selectionRange = editor.getSelectionRange(); // Get the selection range
// Replace only the highlighted text with the model's response
editor.session.replace(selectionRange, data.response);
}
$('#status').text('Ready');
$('#loadingSpinner').hide();
}
// Add this function to handle right-click context menu
function showContextMenu(event) {
event.preventDefault(); // Prevent the default context menu
const selectedText = editor.getSelectedText();
if (selectedText) {
// Create a context menu if it doesn't exist
let $contextMenu = $('#contextMenu');
if ($contextMenu.length === 0) {
$contextMenu = $('<div>', {
id: 'contextMenu',
css: {
position: 'absolute',
backgroundColor: 'white',
border: '1px solid #ccc',
zIndex: 1000,
display: 'none' // Initially hidden
}
}).append('<button id="addToContext">Add to Context</button>').appendTo('body');
}
// Position the context menu
$contextMenu.css({
left: `${event.pageX}px`,
top: `${event.pageY}px`,
display: 'block' // Show the context menu
});
// Add event listener for the "Add to Context" button
$('#addToContext').off('click').on('click', () => {
addToContext(selectedText);
$contextMenu.hide(); // Hide the context menu
});
}
}
// Function to add selected text to the context div
function addToContext(selectedText) {
$('#context').text(selectedText); // Populate the context div with the selected text
}
function formatXmlInEditor() {
const xmlContent = editor.getValue();
const formattedXml = formatXml(xmlContent);
editor.setValue(formattedXml, 1);
}
function formatXml(xml, tab) {
let formatted = '';
let indentLevel = 0;
tab = tab || ' '; // Use two spaces for indentation
// Remove unnecessary spaces between tags and normalize newlines
xml = xml.replace(/>\s*</g, '><').trim();
// Split by tags
xml.split(/(<[^>]+>)/g).forEach(node => {
if (node.trim()) {
if (node.startsWith('</')) {
// Closing tag - decrease indentation
indentLevel--;
formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
} else if (node.startsWith('<') && !node.endsWith('/>')) {
// Opening tag - add the tag and then increase indentation
formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
indentLevel++;
} else if (node.startsWith('<') && node.endsWith('/>')) {
// Self-closing tag - add it at the current level
formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
} else {
// Text content - keep it at the current indentation level
formatted += `${tab.repeat(indentLevel)}${node.trim()}\n`;
}
}
});
return formatted.trim();
}
// Event listeners
$('#startRecording').on('click', startRecording);
$('#stopRecording').on('click', stopRecording);
// Hide context menu on click elsewhere
$(document).on('click', () => {
$('#contextMenu').hide();
});
// Add event listener for the send prompt button
$('#sendPrompt').on('click', sendPrompt);
// Add event listener for the prettify button
$('#prettifyXML').on('click', formatXmlInEditor);
// Initialize Whisper on page load
initWhisper(); |