code
stringlengths
24
2.07M
docstring
stringlengths
25
85.3k
func_name
stringlengths
1
92
language
stringclasses
1 value
repo
stringlengths
5
64
path
stringlengths
4
172
url
stringlengths
44
218
license
stringclasses
7 values
async isValidChatCompletionModel(_modelName = "") { return true; }
Stubbed method for compatibility with LLM interface.
isValidChatCompletionModel
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const systemMessageContent = `${systemPrompt}${this.#appendContext(contextTexts)}`; let messages = []; // Handle system prompt (either real or simulated) if (this.noSystemPromptModels.includes(this.model)) { if (systemMessageContent.trim().length > 0) { this.#log( `Model ${this.model} doesn't support system prompts; simulating.` ); messages.push( { role: "user", content: this.#generateContent({ userPrompt: systemMessageContent, }), }, { role: "assistant", content: [{ text: "Okay." }] } ); } } else if (systemMessageContent.trim().length > 0) { messages.push({ role: "system", content: this.#generateContent({ userPrompt: systemMessageContent }), }); } // Add chat history messages = messages.concat( chatHistory.map((msg) => ({ role: msg.role, content: this.#generateContent({ userPrompt: msg.content, attachments: Array.isArray(msg.attachments) ? msg.attachments : [], }), })) ); // Add final user prompt messages.push({ role: "user", content: this.#generateContent({ userPrompt: userPrompt, attachments: Array.isArray(attachments) ? attachments : [], }), }); return messages; }
Constructs the complete message array in the format expected by the Bedrock Converse API. @param {object} params @param {string} params.systemPrompt - The system prompt text. @param {string[]} params.contextTexts - Array of context text snippets. @param {Array<{role: 'user' | 'assistant', content: string, attachments?: Array<{contentString: string, mime: string}>}>} params.chatHistory - Previous messages. @param {string} params.userPrompt - The latest user prompt text. @param {Array<{contentString: string, mime: string}>} params.attachments - Attachments for the latest user prompt. @returns {Array<object>} The formatted message array for the API call.
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
async getChatCompletion(messages = null, { temperature }) { if (!messages?.length) throw new Error( "AWSBedrock::getChatCompletion requires a non-empty messages array." ); const hasSystem = messages[0]?.role === "system"; const systemBlock = hasSystem ? messages[0].content : undefined; const history = hasSystem ? messages.slice(1) : messages; const maxTokensToSend = this.getMaxOutputTokens(); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.bedrockClient .send( new ConverseCommand({ modelId: this.model, messages: history, inferenceConfig: { maxTokens: maxTokensToSend, temperature: temperature ?? this.defaultTemp, }, system: systemBlock, }) ) .catch((e) => { this.#log( `Bedrock Converse API Error (getChatCompletion): ${e.message}`, e ); if ( e.name === "ValidationException" && e.message.includes("maximum tokens") ) { throw new Error( `AWSBedrock::getChatCompletion failed. Model ${this.model} rejected maxTokens value of ${maxTokensToSend}. Check model documentation for its maximum output token limit and set AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS if needed. Original error: ${e.message}` ); } throw new Error(`AWSBedrock::getChatCompletion failed. ${e.message}`); }), messages, false ); const response = result.output; if (!response?.output?.message) { this.#log( "Bedrock response missing expected output.message structure.", response ); return null; } const latencyMs = response?.metrics?.latencyMs; const outputTokens = response?.usage?.outputTokens; const outputTps = latencyMs > 0 && outputTokens ? outputTokens / (latencyMs / 1000) : 0; return { textResponse: this.#parseReasoningFromResponse(response.output.message), metrics: { prompt_tokens: response?.usage?.inputTokens ?? 0, completion_tokens: outputTokens ?? 0, total_tokens: response?.usage?.totalTokens ?? 0, outputTps: outputTps, duration: result.duration, }, }; }
Sends a request for chat completion (non-streaming). @param {Array<object> | null} messages - Formatted message array from constructPrompt. @param {object} options - Request options. @param {number} options.temperature - Sampling temperature. @returns {Promise<object | null>} Response object with textResponse and metrics, or null. @throws {Error} If the API call fails or validation errors occur.
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature }) { if (!Array.isArray(messages) || messages.length === 0) { throw new Error( "AWSBedrock::streamGetChatCompletion requires a non-empty messages array." ); } const hasSystem = messages[0]?.role === "system"; const systemBlock = hasSystem ? messages[0].content : undefined; const history = hasSystem ? messages.slice(1) : messages; const maxTokensToSend = this.getMaxOutputTokens(); try { // Attempt to initiate the stream const stream = await this.bedrockClient.send( new ConverseStreamCommand({ modelId: this.model, messages: history, inferenceConfig: { maxTokens: maxTokensToSend, temperature: temperature ?? this.defaultTemp, }, system: systemBlock, }) ); // If successful, wrap the stream with performance monitoring const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( stream, messages, false // Indicate it's not a function call measurement ); return measuredStreamRequest; } catch (e) { // Catch errors during the initial .send() call (e.g., validation errors) this.#log( `Bedrock Converse API Error (streamGetChatCompletion setup): ${e.message}`, e ); if ( e.name === "ValidationException" && e.message.includes("maximum tokens") ) { throw new Error( `AWSBedrock::streamGetChatCompletion failed during setup. Model ${this.model} rejected maxTokens value of ${maxTokensToSend}. Check model documentation for its maximum output token limit and set AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS if needed. Original error: ${e.message}` ); } throw new Error( `AWSBedrock::streamGetChatCompletion failed during setup. ${e.message}` ); } }
Sends a request for streaming chat completion. @param {Array<object> | null} messages - Formatted message array from constructPrompt. @param {object} options - Request options. @param {number} [options.temperature] - Sampling temperature. @returns {Promise<import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream>} The monitored stream object. @throws {Error} If the API call setup fails or validation errors occur.
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
handleStream(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps; let hasUsageMetrics = false; let usage = { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }; return new Promise(async (resolve) => { let fullText = ""; let reasoningText = ""; // Abort handler for client closing connection const handleAbort = () => { this.#log(`Client closed connection for stream ${uuid}. Aborting.`); stream?.endMeasurement(usage); // Finalize metrics clientAbortedHandler(resolve, fullText); // Resolve with partial text }; response.on("close", handleAbort); try { // Process stream chunks for await (const chunk of stream.stream) { if (!chunk) { this.#log("Stream returned null/undefined chunk."); continue; } const action = Object.keys(chunk)[0]; switch (action) { case "metadata": // Contains usage metrics at the end if (chunk.metadata?.usage) { hasUsageMetrics = true; usage = { // Overwrite with final metrics prompt_tokens: chunk.metadata.usage.inputTokens ?? 0, completion_tokens: chunk.metadata.usage.outputTokens ?? 0, total_tokens: chunk.metadata.usage.totalTokens ?? 0, }; } break; case "contentBlockDelta": { // Contains text or reasoning deltas const delta = chunk.contentBlockDelta?.delta; if (!delta) break; const token = delta.text; const reasoningToken = delta.reasoningContent?.text; if (reasoningToken) { // Handle reasoning text if (reasoningText.length === 0) { // Start of reasoning block const startTag = "<think>"; writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: startTag + reasoningToken, close: false, error: false, }); reasoningText += startTag + reasoningToken; } else { // Continuation of reasoning block writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: reasoningToken, close: false, error: false, }); reasoningText += reasoningToken; } } else if (token) { // Handle regular text if (reasoningText.length > 0) { // If reasoning was just output, close the tag const endTag = "</think>"; writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: endTag, close: false, error: false, }); fullText += reasoningText + endTag; // Add completed reasoning to final text reasoningText = ""; // Reset reasoning buffer } fullText += token; // Append regular text if (!hasUsageMetrics) usage.completion_tokens++; // Estimate usage if no metrics yet writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: token, close: false, error: false, }); } break; } case "messageStop": // End of message event if (chunk.messageStop?.usage) { // Check for final metrics here too hasUsageMetrics = true; usage = { // Overwrite with final metrics if available prompt_tokens: chunk.messageStop.usage.inputTokens ?? usage.prompt_tokens, completion_tokens: chunk.messageStop.usage.outputTokens ?? usage.completion_tokens, total_tokens: chunk.messageStop.usage.totalTokens ?? usage.total_tokens, }; } // Ensure reasoning tag is closed if message stops mid-reasoning if (reasoningText.length > 0) { const endTag = "</think>"; writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: endTag, close: false, error: false, }); fullText += reasoningText + endTag; reasoningText = ""; } break; // Ignore other event types for now case "messageStart": case "contentBlockStart": case "contentBlockStop": break; default: this.#log(`Unhandled stream action: ${action}`, chunk); } } // End for await loop // Final cleanup for reasoning tag in case stream ended abruptly if (reasoningText.length > 0 && !fullText.endsWith("</think>")) { const endTag = "</think>"; if (!response.writableEnded) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: endTag, close: false, error: false, }); } fullText += reasoningText + endTag; } // Send final closing chunk to signal end of stream if (!response.writableEnded) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); } } catch (error) { // Handle errors during stream processing this.#log( `\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${error.message}`, error ); if (response && !response.writableEnded) { writeResponseChunk(response, { uuid, type: "abort", textResponse: null, sources, close: true, error: `AWSBedrock:streaming - error. ${ error?.message ?? "Unknown error" }`, }); } } finally { response.removeListener("close", handleAbort); stream?.endMeasurement(usage); resolve(fullText); // Resolve with the accumulated text } }); }
Handles the stream response from the AWS Bedrock API ConverseStreamCommand. Parses chunks, handles reasoning tags, and estimates token usage if not provided. @param {object} response - The HTTP response object to write chunks to. @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion. @param {object} responseProps - Additional properties for the response chunks. @param {string} responseProps.uuid - Unique ID for the response. @param {Array} responseProps.sources - Source documents used (if any). @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
handleAbort = () => { this.#log(`Client closed connection for stream ${uuid}. Aborting.`); stream?.endMeasurement(usage); // Finalize metrics clientAbortedHandler(resolve, fullText); // Resolve with partial text }
Handles the stream response from the AWS Bedrock API ConverseStreamCommand. Parses chunks, handles reasoning tags, and estimates token usage if not provided. @param {object} response - The HTTP response object to write chunks to. @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion. @param {object} responseProps - Additional properties for the response chunks. @param {string} responseProps.uuid - Unique ID for the response. @param {Array} responseProps.sources - Source documents used (if any). @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
handleAbort = () => { this.#log(`Client closed connection for stream ${uuid}. Aborting.`); stream?.endMeasurement(usage); // Finalize metrics clientAbortedHandler(resolve, fullText); // Resolve with partial text }
Handles the stream response from the AWS Bedrock API ConverseStreamCommand. Parses chunks, handles reasoning tags, and estimates token usage if not provided. @param {object} response - The HTTP response object to write chunks to. @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion. @param {object} responseProps - Additional properties for the response chunks. @param {string} responseProps.uuid - Unique ID for the response. @param {Array} responseProps.sources - Source documents used (if any). @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Handles the stream response from the AWS Bedrock API ConverseStreamCommand. Parses chunks, handles reasoning tags, and estimates token usage if not provided. @param {object} response - The HTTP response object to write chunks to. @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion. @param {object} responseProps - Additional properties for the response chunks. @param {string} responseProps.uuid - Unique ID for the response. @param {Array} responseProps.sources - Source documents used (if any). @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Handles the stream response from the AWS Bedrock API ConverseStreamCommand. Parses chunks, handles reasoning tags, and estimates token usage if not provided. @param {object} response - The HTTP response object to write chunks to. @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion. @param {object} responseProps - Additional properties for the response chunks. @param {string} responseProps.uuid - Unique ID for the response. @param {Array} responseProps.sources - Source documents used (if any). @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Handles the stream response from the AWS Bedrock API ConverseStreamCommand. Parses chunks, handles reasoning tags, and estimates token usage if not provided. @param {object} response - The HTTP response object to write chunks to. @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion. @param {object} responseProps - Additional properties for the response chunks. @param {string} responseProps.uuid - Unique ID for the response. @param {Array} responseProps.sources - Source documents used (if any). @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
function getImageFormatFromMime(mimeType = "") { if (!mimeType) return null; const parts = mimeType.toLowerCase().split("/"); if (parts?.[0] !== "image") return null; let format = parts?.[1]; if (!format) return null; // Remap jpg to jpeg switch (format) { case "jpg": format = "jpeg"; break; default: break; } if (!SUPPORTED_BEDROCK_IMAGE_FORMATS.includes(format)) return null; return format; }
Parses a MIME type string (e.g., "image/jpeg") to extract and validate the image format supported by Bedrock Converse. Handles 'image/jpg' as 'jpeg'. @param {string | null | undefined} mimeType - The MIME type string. @returns {string | null} The validated image format (e.g., "jpeg") or null if invalid/unsupported.
getImageFormatFromMime
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/utils.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/utils.js
MIT
function base64ToUint8Array(base64String) { try { const binaryString = atob(base64String); const len = binaryString.length; const bytes = new Uint8Array(len); for (let i = 0; i < len; i++) bytes[i] = binaryString.charCodeAt(i); return bytes; } catch (e) { console.error( `[AWSBedrock] Error decoding base64 string with atob: ${e.message}` ); return null; } }
Decodes a pure base64 string (without data URI prefix) into a Uint8Array using the atob method. This approach matches the technique previously used by Langchain's implementation. @param {string} base64String - The pure base64 encoded data. @returns {Uint8Array | null} The resulting byte array or null on decoding error.
base64ToUint8Array
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/utils.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/utils.js
MIT
async handleStream(response, stream, responseProps) { return new Promise(async (resolve) => { const { uuid = v4(), sources = [] } = responseProps; let fullText = ""; let usage = { prompt_tokens: 0, completion_tokens: 0, }; const handleAbort = () => { writeResponseChunk(response, { uuid, sources, type: "abort", textResponse: fullText, close: true, error: false, }); response.removeListener("close", handleAbort); stream.endMeasurement(usage); resolve(fullText); }; response.on("close", handleAbort); try { for await (const chat of stream) { if (chat.eventType === "stream-end") { const usageMetrics = chat?.response?.meta?.tokens || {}; usage.prompt_tokens = usageMetrics.inputTokens || 0; usage.completion_tokens = usageMetrics.outputTokens || 0; } if (chat.eventType === "text-generation") { const text = chat.text; fullText += text; writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: text, close: false, error: false, }); } } writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); stream.endMeasurement(usage); resolve(fullText); } catch (error) { writeResponseChunk(response, { uuid, sources, type: "abort", textResponse: null, close: true, error: error.message, }); response.removeListener("close", handleAbort); stream.endMeasurement(usage); resolve(fullText); } }); }
Handles the stream response from the Cohere API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/cohere/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/cohere/index.js
MIT
handleAbort = () => { writeResponseChunk(response, { uuid, sources, type: "abort", textResponse: fullText, close: true, error: false, }); response.removeListener("close", handleAbort); stream.endMeasurement(usage); resolve(fullText); }
Handles the stream response from the Cohere API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/cohere/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/cohere/index.js
MIT
handleAbort = () => { writeResponseChunk(response, { uuid, sources, type: "abort", textResponse: fullText, close: true, error: false, }); response.removeListener("close", handleAbort); stream.endMeasurement(usage); resolve(fullText); }
Handles the stream response from the Cohere API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/cohere/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/cohere/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Handles the stream response from the Cohere API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/cohere/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/cohere/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Handles the stream response from the Cohere API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/cohere/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/cohere/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Handles the stream response from the Cohere API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/cohere/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/cohere/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `DeepSeek chat: ${this.model} is not valid for chat completion!` ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, }) .catch((e) => { throw new Error(e.message); }) ); if ( !result?.output?.hasOwnProperty("choices") || result?.output?.choices?.length === 0 ) throw new Error( `Invalid response body returned from DeepSeek: ${JSON.stringify(result.output)}` ); return { textResponse: this.#parseReasoningFromResponse(result.output.choices[0]), metrics: { prompt_tokens: result.output.usage.prompt_tokens || 0, completion_tokens: result.output.usage.completion_tokens || 0, total_tokens: result.output.usage.total_tokens || 0, outputTps: result.output.usage.completion_tokens / result.duration, duration: result.duration, }, }; }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `DeepSeek chat: ${this.model} is not valid for chat completion!` ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages, false ); return measuredStreamRequest; }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
handleStream(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps; let hasUsageMetrics = false; let usage = { completion_tokens: 0, }; return new Promise(async (resolve) => { let fullText = ""; let reasoningText = ""; // Establish listener to early-abort a streaming response // in case things go sideways or the user does not like the response. // We preserve the generated text but continue as if chat was completed // to preserve previously generated content. const handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }; response.on("close", handleAbort); try { for await (const chunk of stream) { const message = chunk?.choices?.[0]; const token = message?.delta?.content; const reasoningToken = message?.delta?.reasoning_content; if ( chunk.hasOwnProperty("usage") && // exists !!chunk.usage && // is not null Object.values(chunk.usage).length > 0 // has values ) { if (chunk.usage.hasOwnProperty("prompt_tokens")) { usage.prompt_tokens = Number(chunk.usage.prompt_tokens); } if (chunk.usage.hasOwnProperty("completion_tokens")) { hasUsageMetrics = true; // to stop estimating counter usage.completion_tokens = Number(chunk.usage.completion_tokens); } } // Reasoning models will always return the reasoning text before the token text. if (reasoningToken) { // If the reasoning text is empty (''), we need to initialize it // and send the first chunk of reasoning text. if (reasoningText.length === 0) { writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: `<think>${reasoningToken}`, close: false, error: false, }); reasoningText += `<think>${reasoningToken}`; continue; } else { writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: reasoningToken, close: false, error: false, }); reasoningText += reasoningToken; } } // If the reasoning text is not empty, but the reasoning token is empty // and the token text is not empty we need to close the reasoning text and begin sending the token text. if (!!reasoningText && !reasoningToken && token) { writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: `</think>`, close: false, error: false, }); fullText += `${reasoningText}</think>`; reasoningText = ""; } if (token) { fullText += token; // If we never saw a usage metric, we can estimate them by number of completion chunks if (!hasUsageMetrics) usage.completion_tokens++; writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: token, close: false, error: false, }); } // LocalAi returns '' and others return null on chunks - the last chunk is not "" or null. // Either way, the key `finish_reason` must be present to determine ending chunk. if ( message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason message.finish_reason !== "" && message.finish_reason !== null ) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); stream?.endMeasurement(usage); resolve(fullText); break; // Break streaming when a valid finish_reason is first encountered } } } catch (e) { console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`); writeResponseChunk(response, { uuid, type: "abort", textResponse: null, sources: [], close: true, error: e.message, }); stream?.endMeasurement(usage); resolve(fullText); // Return what we currently have - if anything. } }); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/deepseek/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/deepseek/index.js
MIT
static parseBasePath(providedBasePath = process.env.DPAIS_LLM_BASE_PATH) { try { const baseURL = new URL(providedBasePath); const basePath = `${baseURL.origin}/v1/openai`; return basePath; } catch (e) { return null; } }
Parse the base path for the Dell Pro AI Studio API so we can use it for inference requests @param {string} providedBasePath @returns {string}
parseBasePath
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
log(text, ...args) { console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args); }
Parse the base path for the Dell Pro AI Studio API so we can use it for inference requests @param {string} providedBasePath @returns {string}
log
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
streamingEnabled() { return "streamGetChatCompletion" in this; }
Parse the base path for the Dell Pro AI Studio API so we can use it for inference requests @param {string} providedBasePath @returns {string}
streamingEnabled
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
static promptWindowLimit(_modelName) { const limit = process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT || 4096; if (!limit || isNaN(Number(limit))) throw new Error("No Dell Pro AI Studio token context limit was set."); return Number(limit); }
Parse the base path for the Dell Pro AI Studio API so we can use it for inference requests @param {string} providedBasePath @returns {string}
promptWindowLimit
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
promptWindowLimit() { const limit = process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT || 4096; if (!limit || isNaN(Number(limit))) throw new Error("No Dell Pro AI Studio token context limit was set."); return Number(limit); }
Parse the base path for the Dell Pro AI Studio API so we can use it for inference requests @param {string} providedBasePath @returns {string}
promptWindowLimit
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
async isValidChatCompletionModel(_ = "") { return true; }
Parse the base path for the Dell Pro AI Studio API so we can use it for inference requests @param {string} providedBasePath @returns {string}
isValidChatCompletionModel
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", _attachments = [], // not used for Dell Pro AI Studio - `attachments` passed in is ignored }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, _attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!this.model) throw new Error( `Dell Pro AI Studio chat: ${this.model} is not valid or defined model for chat completion!` ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.dpais.chat.completions.create({ model: this.model, messages, temperature, }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage?.prompt_tokens || 0, completion_tokens: result.output.usage?.completion_tokens || 0, total_tokens: result.output.usage?.total_tokens || 0, outputTps: result.output.usage?.completion_tokens / result.duration, duration: result.duration, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!this.model) throw new Error( `Dell Pro AI Studio chat: ${this.model} is not valid or defined model for chat completion!` ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.dpais.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/dellProAiStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/dellProAiStudio/index.js
MIT
get supportsSystemPrompt() { return !NO_SYSTEM_PROMPT_MODELS.includes(this.model); }
Checks if the model supports system prompts This is a static list of models that are known to not support system prompts since this information is not available in the API model response. @returns {boolean}
supportsSystemPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
static cacheIsStale() { const MAX_STALE = 8.64e7; // 1 day in MS if (!fs.existsSync(path.resolve(cacheFolder, ".cached_at"))) return true; const now = Number(new Date()); const timestampMs = Number( fs.readFileSync(path.resolve(cacheFolder, ".cached_at")) ); return now - timestampMs > MAX_STALE; }
Checks if the model supports system prompts This is a static list of models that are known to not support system prompts since this information is not available in the API model response. @returns {boolean}
cacheIsStale
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
streamingEnabled() { return "streamGetChatCompletion" in this; }
Checks if the model supports system prompts This is a static list of models that are known to not support system prompts since this information is not available in the API model response. @returns {boolean}
streamingEnabled
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
static promptWindowLimit(modelName) { try { const cacheModelPath = path.resolve(cacheFolder, "models.json"); if (!fs.existsSync(cacheModelPath)) return MODEL_MAP.get("gemini", modelName) ?? 30_720; const models = safeJsonParse(fs.readFileSync(cacheModelPath)); const model = models.find((model) => model.id === modelName); if (!model) throw new Error( "Model not found in cache - falling back to default model." ); return model.contextWindow; } catch (e) { console.error(`GeminiLLM:promptWindowLimit`, e.message); return MODEL_MAP.get("gemini", modelName) ?? 30_720; } }
Checks if the model supports system prompts This is a static list of models that are known to not support system prompts since this information is not available in the API model response. @returns {boolean}
promptWindowLimit
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
promptWindowLimit() { try { if (!fs.existsSync(this.cacheModelPath)) return MODEL_MAP.get("gemini", this.model) ?? 30_720; const models = safeJsonParse(fs.readFileSync(this.cacheModelPath)); const model = models.find((model) => model.id === this.model); if (!model) throw new Error( "Model not found in cache - falling back to default model." ); return model.contextWindow; } catch (e) { console.error(`GeminiLLM:promptWindowLimit`, e.message); return MODEL_MAP.get("gemini", this.model) ?? 30_720; } }
Checks if the model supports system prompts This is a static list of models that are known to not support system prompts since this information is not available in the API model response. @returns {boolean}
promptWindowLimit
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
isExperimentalModel(modelName) { if ( fs.existsSync(cacheFolder) && fs.existsSync(path.resolve(cacheFolder, "models.json")) ) { const models = safeJsonParse( fs.readFileSync(path.resolve(cacheFolder, "models.json")) ); const model = models.find((model) => model.id === modelName); if (!model) return false; return model.experimental; } return modelName.includes("exp") || v1BetaModels.includes(modelName); }
Checks if a model is experimental by reading from the cache if available, otherwise it will perform a blind check against the v1BetaModels list - which is manually maintained and updated. @param {string} modelName - The name of the model to check @returns {boolean} A boolean indicating if the model is experimental
isExperimentalModel
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
async isValidChatCompletionModel(modelName = "") { const models = await this.fetchModels(process.env.GEMINI_API_KEY); return models.some((model) => model.id === modelName); }
Checks if a model is valid for chat completion (unused) @deprecated @param {string} modelName - The name of the model to check @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the model is valid
isValidChatCompletionModel
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], // This is the specific attachment for only this prompt }) { let prompt = []; if (this.supportsSystemPrompt) { prompt.push({ role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }); } else { this.#log( `${this.model} - does not support system prompts - emulating...` ); prompt.push( { role: "user", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }, { role: "assistant", content: "Okay.", } ); } return [ ...prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature: temperature, }) .catch((e) => { console.error(e); throw new Error(e.message); }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage.prompt_tokens || 0, completion_tokens: result.output.usage.completion_tokens || 0, total_tokens: result.output.usage.total_tokens || 0, outputTps: result.output.usage.completion_tokens / result.duration, duration: result.duration, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature: temperature, }), messages, true ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/gemini/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/gemini/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, max_tokens: this.maxTokens, }) .catch((e) => { throw new Error(e.message); }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: this.#parseReasoningFromResponse(result.output.choices[0]), metrics: { prompt_tokens: result.output?.usage?.prompt_tokens || 0, completion_tokens: result.output?.usage?.completion_tokens || 0, total_tokens: result.output?.usage?.total_tokens || 0, outputTps: (result.output?.usage?.completion_tokens || 0) / result.duration, duration: result.duration, }, }; }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, max_tokens: this.maxTokens, }), messages // runPromptTokenCalculation: true - There is not way to know if the generic provider connected is returning // the correct usage metrics if any at all since any provider could be connected. ); return measuredStreamRequest; }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
handleStream(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps; let hasUsageMetrics = false; let usage = { completion_tokens: 0, }; return new Promise(async (resolve) => { let fullText = ""; let reasoningText = ""; // Establish listener to early-abort a streaming response // in case things go sideways or the user does not like the response. // We preserve the generated text but continue as if chat was completed // to preserve previously generated content. const handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }; response.on("close", handleAbort); try { for await (const chunk of stream) { const message = chunk?.choices?.[0]; const token = message?.delta?.content; const reasoningToken = message?.delta?.reasoning_content; if ( chunk.hasOwnProperty("usage") && // exists !!chunk.usage && // is not null Object.values(chunk.usage).length > 0 // has values ) { if (chunk.usage.hasOwnProperty("prompt_tokens")) { usage.prompt_tokens = Number(chunk.usage.prompt_tokens); } if (chunk.usage.hasOwnProperty("completion_tokens")) { hasUsageMetrics = true; // to stop estimating counter usage.completion_tokens = Number(chunk.usage.completion_tokens); } } // Reasoning models will always return the reasoning text before the token text. if (reasoningToken) { // If the reasoning text is empty (''), we need to initialize it // and send the first chunk of reasoning text. if (reasoningText.length === 0) { writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: `<think>${reasoningToken}`, close: false, error: false, }); reasoningText += `<think>${reasoningToken}`; continue; } else { writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: reasoningToken, close: false, error: false, }); reasoningText += reasoningToken; } } // If the reasoning text is not empty, but the reasoning token is empty // and the token text is not empty we need to close the reasoning text and begin sending the token text. if (!!reasoningText && !reasoningToken && token) { writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: `</think>`, close: false, error: false, }); fullText += `${reasoningText}</think>`; reasoningText = ""; } if (token) { fullText += token; // If we never saw a usage metric, we can estimate them by number of completion chunks if (!hasUsageMetrics) usage.completion_tokens++; writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: token, close: false, error: false, }); } if ( message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason message.finish_reason !== "" && message.finish_reason !== null ) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); stream?.endMeasurement(usage); resolve(fullText); break; // Break streaming when a valid finish_reason is first encountered } } } catch (e) { console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`); writeResponseChunk(response, { uuid, type: "abort", textResponse: null, sources: [], close: true, error: e.message, }); stream?.endMeasurement(usage); resolve(fullText); } }); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Parses and prepends reasoning from the response and returns the full text response. @param {Object} response @returns {string}
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/genericOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/genericOpenAi/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], // This is the specific attachment for only this prompt }) { // NOTICE: SEE GroqLLM.#conditionalPromptStruct for more information on how attachments are handled with Groq. return this.#conditionalPromptStruct({ systemPrompt, contextTexts, chatHistory, userPrompt, attachments, }); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `GroqAI:chatCompletion: ${this.model} is not valid for chat completion!` ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, }) .catch((e) => { throw new Error(e.message); }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage.prompt_tokens || 0, completion_tokens: result.output.usage.completion_tokens || 0, total_tokens: result.output.usage.total_tokens || 0, outputTps: result.output.usage.completion_tokens / result.output.usage.completion_time, duration: result.output.usage.total_time, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `GroqAI:streamChatCompletion: ${this.model} is not valid for chat completion!` ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages, false ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/groq/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/groq/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, max_tokens: this.maxTokens, }) .catch((e) => { throw new Error(e.message); }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; const promptTokens = LLMPerformanceMonitor.countTokens(messages); const completionTokens = LLMPerformanceMonitor.countTokens([ { content: result.output.choices[0].message.content }, ]); return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: promptTokens, completion_tokens: completionTokens, total_tokens: promptTokens + completionTokens, outputTps: completionTokens / result.duration, duration: result.duration, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, max_tokens: this.maxTokens, }), messages ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
handleStream(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps; return new Promise(async (resolve) => { let fullText = ""; let usage = { prompt_tokens: LLMPerformanceMonitor.countTokens(stream.messages || []), completion_tokens: 0, }; const handleAbort = () => { usage.completion_tokens = LLMPerformanceMonitor.countTokens([ { content: fullText }, ]); stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }; response.on("close", handleAbort); for await (const chunk of stream) { const message = chunk?.choices?.[0]; const token = message?.delta?.content; if (token) { fullText += token; writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: token, close: false, error: false, }); } // KoboldCPP finishes with "length" or "stop" if ( message.finish_reason !== "null" && (message.finish_reason === "length" || message.finish_reason === "stop") ) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); usage.completion_tokens = LLMPerformanceMonitor.countTokens([ { content: fullText }, ]); stream?.endMeasurement(usage); resolve(fullText); } } }); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
handleAbort = () => { usage.completion_tokens = LLMPerformanceMonitor.countTokens([ { content: fullText }, ]); stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
handleAbort = () => { usage.completion_tokens = LLMPerformanceMonitor.countTokens([ { content: fullText }, ]); stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/koboldCPP/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/koboldCPP/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, max_tokens: parseInt(this.maxTokens), // LiteLLM requires int }) .catch((e) => { throw new Error(e.message); }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage?.prompt_tokens || 0, completion_tokens: result.output.usage?.completion_tokens || 0, total_tokens: result.output.usage?.total_tokens || 0, outputTps: (result.output.usage?.completion_tokens || 0) / result.duration, duration: result.duration, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, max_tokens: parseInt(this.maxTokens), // LiteLLM requires int }), messages // runPromptTokenCalculation: true - We manually count the tokens because they may or may not be provided in the stream // responses depending on LLM connected. If they are provided, then we counted for nothing, but better than nothing. ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/liteLLM/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/liteLLM/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!this.model) throw new Error( `LMStudio chat: ${this.model} is not valid or defined model for chat completion!` ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.lmstudio.chat.completions.create({ model: this.model, messages, temperature, }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage?.prompt_tokens || 0, completion_tokens: result.output.usage?.completion_tokens || 0, total_tokens: result.output.usage?.total_tokens || 0, outputTps: result.output.usage?.completion_tokens / result.duration, duration: result.duration, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!this.model) throw new Error( `LMStudio chat: ${this.model} is not valid or defined model for chat completion!` ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.lmstudio.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
function parseLMStudioBasePath(providedBasePath = "") { try { const baseURL = new URL(providedBasePath); const basePath = `${baseURL.origin}/v1`; return basePath; } catch (e) { return providedBasePath; } }
Parse the base path for the LMStudio API. Since the base path must end in /v1 and cannot have a trailing slash, and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format. @param {string} basePath @returns {string}
parseLMStudioBasePath
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/lmStudio/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/lmStudio/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `LocalAI chat: ${this.model} is not valid for chat completion!` ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions.create({ model: this.model, messages, temperature, }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; const promptTokens = LLMPerformanceMonitor.countTokens(messages); const completionTokens = LLMPerformanceMonitor.countTokens( result.output.choices[0].message.content ); return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: promptTokens, completion_tokens: completionTokens, total_tokens: promptTokens + completionTokens, outputTps: completionTokens / result.duration, duration: result.duration, }, }; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `LocalAi chat: ${this.model} is not valid for chat completion!` ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages ); return measuredStreamRequest; }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Construct the user prompt for this model. @param {{attachments: import("../../helpers").Attachment[]}} param0 @returns
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/localAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/localAi/index.js
MIT