code
stringlengths 24
2.07M
| docstring
stringlengths 25
85.3k
| func_name
stringlengths 1
92
| language
stringclasses 1
value | repo
stringlengths 5
64
| path
stringlengths 4
172
| url
stringlengths 44
218
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
jobs() {
return [
// Job for auto-sync of documents
// https://github.com/breejs/bree
{
name: "sync-watched-documents",
interval: "1hr",
},
];
}
|
@returns {import("@mintplex-labs/bree").Job[]}
|
jobs
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/BackgroundWorkers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/BackgroundWorkers/index.js
|
MIT
|
onError(error, _workerMetadata) {
this.logger.error(`${error.message}`, {
service: "bg-worker",
origin: error.name,
});
}
|
@returns {import("@mintplex-labs/bree").Job[]}
|
onError
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/BackgroundWorkers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/BackgroundWorkers/index.js
|
MIT
|
onWorkerMessageHandler(message, _workerMetadata) {
this.logger.info(`${message.message}`, {
service: "bg-worker",
origin: message.name,
});
}
|
@returns {import("@mintplex-labs/bree").Job[]}
|
onWorkerMessageHandler
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/BackgroundWorkers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/BackgroundWorkers/index.js
|
MIT
|
clearConfig() {
this.#customConfig = null;
}
|
Clears the current config so it can be refetched on the server for next render.
|
clearConfig
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/boot/MetaGenerator.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/boot/MetaGenerator.js
|
MIT
|
async generate(response, code = 200) {
if (this.#customConfig === null) await this.#fetchConfg();
response.status(code).send(`
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
${this.#assembleMeta()}
<script type="module" crossorigin src="/index.js"></script>
<link rel="stylesheet" href="/index.css">
</head>
<body>
<div id="root" class="h-screen"></div>
</body>
</html>`);
}
|
@param {import('express').Response} response
@param {number} code
|
generate
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/boot/MetaGenerator.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/boot/MetaGenerator.js
|
MIT
|
async function chatSync({
workspace,
message = null,
mode = "chat",
user = null,
thread = null,
sessionId = null,
attachments = [],
reset = false,
}) {
const uuid = uuidv4();
const chatMode = mode ?? "chat";
// If the user wants to reset the chat history we do so pre-flight
// and continue execution. If no message is provided then the user intended
// to reset the chat history only and we can exit early with a confirmation.
if (reset) {
await WorkspaceChats.markThreadHistoryInvalidV2({
workspaceId: workspace.id,
user_id: user?.id,
thread_id: thread?.id,
api_session_id: sessionId,
});
if (!message?.length) {
return {
id: uuid,
type: "textResponse",
textResponse: "Chat history was reset!",
sources: [],
close: true,
error: null,
metrics: {},
};
}
}
// Process slash commands
// Since preset commands are not supported in API calls, we can just process the message here
const processedMessage = await grepAllSlashCommands(message);
message = processedMessage;
if (EphemeralAgentHandler.isAgentInvocation({ message })) {
await Telemetry.sendTelemetry("agent_chat_started");
// Initialize the EphemeralAgentHandler to handle non-continuous
// conversations with agents since this is over REST.
const agentHandler = new EphemeralAgentHandler({
uuid,
workspace,
prompt: message,
userId: user?.id || null,
threadId: thread?.id || null,
sessionId,
});
// Establish event listener that emulates websocket calls
// in Aibitat so that we can keep the same interface in Aibitat
// but use HTTP.
const eventListener = new EphemeralEventListener();
await agentHandler.init();
await agentHandler.createAIbitat({ handler: eventListener });
agentHandler.startAgentCluster();
// The cluster has started and now we wait for close event since
// this is a synchronous call for an agent, so we return everything at once.
// After this, we conclude the call as we normally do.
return await eventListener
.waitForClose()
.then(async ({ thoughts, textResponse }) => {
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: String(message),
response: {
text: textResponse,
sources: [],
attachments,
type: chatMode,
thoughts,
},
include: false,
apiSessionId: sessionId,
});
return {
id: uuid,
type: "textResponse",
sources: [],
close: true,
error: null,
textResponse,
thoughts,
};
});
}
const LLMConnector = getLLMProvider({
provider: workspace?.chatProvider,
model: workspace?.chatModel,
});
const VectorDb = getVectorDbClass();
const messageLimit = workspace?.openAiHistory || 20;
const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
const embeddingsCount = await VectorDb.namespaceCount(workspace.slug);
// User is trying to query-mode chat a workspace that has no data in it - so
// we should exit early as no information can be found under these conditions.
if ((!hasVectorizedSpace || embeddingsCount === 0) && chatMode === "query") {
const textResponse =
workspace?.queryRefusalResponse ??
"There is no relevant information in this workspace to answer your query.";
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: String(message),
response: {
text: textResponse,
sources: [],
attachments: attachments,
type: chatMode,
metrics: {},
},
include: false,
apiSessionId: sessionId,
});
return {
id: uuid,
type: "textResponse",
sources: [],
close: true,
error: null,
textResponse,
metrics: {},
};
}
// If we are here we know that we are in a workspace that is:
// 1. Chatting in "chat" mode and may or may _not_ have embeddings
// 2. Chatting in "query" mode and has at least 1 embedding
let contextTexts = [];
let sources = [];
let pinnedDocIdentifiers = [];
const { rawHistory, chatHistory } = await recentChatHistory({
user,
workspace,
thread,
messageLimit,
apiSessionId: sessionId,
});
await new DocumentManager({
workspace,
maxTokens: LLMConnector.promptWindowLimit(),
})
.pinnedDocs()
.then((pinnedDocs) => {
pinnedDocs.forEach((doc) => {
const { pageContent, ...metadata } = doc;
pinnedDocIdentifiers.push(sourceIdentifier(doc));
contextTexts.push(doc.pageContent);
sources.push({
text:
pageContent.slice(0, 1_000) +
"...continued on in source document...",
...metadata,
});
});
});
const vectorSearchResults =
embeddingsCount !== 0
? await VectorDb.performSimilaritySearch({
namespace: workspace.slug,
input: message,
LLMConnector,
similarityThreshold: workspace?.similarityThreshold,
topN: workspace?.topN,
filterIdentifiers: pinnedDocIdentifiers,
rerank: workspace?.vectorSearchMode === "rerank",
})
: {
contextTexts: [],
sources: [],
message: null,
};
// Failed similarity search if it was run at all and failed.
if (!!vectorSearchResults.message) {
return {
id: uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: vectorSearchResults.message,
metrics: {},
};
}
const { fillSourceWindow } = require("../helpers/chat");
const filledSources = fillSourceWindow({
nDocs: workspace?.topN || 4,
searchResults: vectorSearchResults.sources,
history: rawHistory,
filterIdentifiers: pinnedDocIdentifiers,
});
// Why does contextTexts get all the info, but sources only get current search?
// This is to give the ability of the LLM to "comprehend" a contextual response without
// populating the Citations under a response with documents the user "thinks" are irrelevant
// due to how we manage backfilling of the context to keep chats with the LLM more correct in responses.
// If a past citation was used to answer the question - that is visible in the history so it logically makes sense
// and does not appear to the user that a new response used information that is otherwise irrelevant for a given prompt.
// TLDR; reduces GitHub issues for "LLM citing document that has no answer in it" while keep answers highly accurate.
contextTexts = [...contextTexts, ...filledSources.contextTexts];
sources = [...sources, ...vectorSearchResults.sources];
// If in query mode and no context chunks are found from search, backfill, or pins - do not
// let the LLM try to hallucinate a response or use general knowledge and exit early
if (chatMode === "query" && contextTexts.length === 0) {
const textResponse =
workspace?.queryRefusalResponse ??
"There is no relevant information in this workspace to answer your query.";
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: {
text: textResponse,
sources: [],
attachments: attachments,
type: chatMode,
metrics: {},
},
threadId: thread?.id || null,
include: false,
apiSessionId: sessionId,
user,
});
return {
id: uuid,
type: "textResponse",
sources: [],
close: true,
error: null,
textResponse,
metrics: {},
};
}
// Compress & Assemble message to ensure prompt passes token limit with room for response
// and build system messages based on inputs and history.
const messages = await LLMConnector.compressMessages(
{
systemPrompt: await chatPrompt(workspace, user),
userPrompt: message,
contextTexts,
chatHistory,
attachments,
},
rawHistory
);
// Send the text completion.
const { textResponse, metrics: performanceMetrics } =
await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
});
if (!textResponse) {
return {
id: uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: "No text completion could be completed with this input.",
metrics: performanceMetrics,
};
}
const { chat } = await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: {
text: textResponse,
sources,
attachments,
type: chatMode,
metrics: performanceMetrics,
},
threadId: thread?.id || null,
apiSessionId: sessionId,
user,
});
return {
id: uuid,
type: "textResponse",
close: true,
error: null,
chatId: chat.id,
textResponse,
sources,
metrics: performanceMetrics,
};
}
|
Handle synchronous chats with your workspace via the developer API endpoint
@param {{
workspace: import("@prisma/client").workspaces,
message:string,
mode: "chat"|"query",
user: import("@prisma/client").users|null,
thread: import("@prisma/client").workspace_threads|null,
sessionId: string|null,
attachments: { name: string; mime: string; contentString: string }[],
reset: boolean,
}} parameters
@returns {Promise<ResponseObject>}
|
chatSync
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/chats/apiChatHandler.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/chats/apiChatHandler.js
|
MIT
|
async function streamChat({
response,
workspace,
message = null,
mode = "chat",
user = null,
thread = null,
sessionId = null,
attachments = [],
reset = false,
}) {
const uuid = uuidv4();
const chatMode = mode ?? "chat";
// If the user wants to reset the chat history we do so pre-flight
// and continue execution. If no message is provided then the user intended
// to reset the chat history only and we can exit early with a confirmation.
if (reset) {
await WorkspaceChats.markThreadHistoryInvalidV2({
workspaceId: workspace.id,
user_id: user?.id,
thread_id: thread?.id,
api_session_id: sessionId,
});
if (!message?.length) {
writeResponseChunk(response, {
id: uuid,
type: "textResponse",
textResponse: "Chat history was reset!",
sources: [],
attachments: [],
close: true,
error: null,
metrics: {},
});
return;
}
}
// Check for and process slash commands
// Since preset commands are not supported in API calls, we can just process the message here
const processedMessage = await grepAllSlashCommands(message);
message = processedMessage;
if (EphemeralAgentHandler.isAgentInvocation({ message })) {
await Telemetry.sendTelemetry("agent_chat_started");
// Initialize the EphemeralAgentHandler to handle non-continuous
// conversations with agents since this is over REST.
const agentHandler = new EphemeralAgentHandler({
uuid,
workspace,
prompt: message,
userId: user?.id || null,
threadId: thread?.id || null,
sessionId,
});
// Establish event listener that emulates websocket calls
// in Aibitat so that we can keep the same interface in Aibitat
// but use HTTP.
const eventListener = new EphemeralEventListener();
await agentHandler.init();
await agentHandler.createAIbitat({ handler: eventListener });
agentHandler.startAgentCluster();
// The cluster has started and now we wait for close event since
// and stream back any results we get from agents as they come in.
return eventListener
.streamAgentEvents(response, uuid)
.then(async ({ thoughts, textResponse }) => {
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: String(message),
response: {
text: textResponse,
sources: [],
attachments: attachments,
type: chatMode,
thoughts,
},
include: true,
threadId: thread?.id || null,
apiSessionId: sessionId,
});
writeResponseChunk(response, {
uuid,
type: "finalizeResponseStream",
textResponse,
thoughts,
close: true,
error: false,
});
});
}
const LLMConnector = getLLMProvider({
provider: workspace?.chatProvider,
model: workspace?.chatModel,
});
const VectorDb = getVectorDbClass();
const messageLimit = workspace?.openAiHistory || 20;
const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
const embeddingsCount = await VectorDb.namespaceCount(workspace.slug);
// User is trying to query-mode chat a workspace that has no data in it - so
// we should exit early as no information can be found under these conditions.
if ((!hasVectorizedSpace || embeddingsCount === 0) && chatMode === "query") {
const textResponse =
workspace?.queryRefusalResponse ??
"There is no relevant information in this workspace to answer your query.";
writeResponseChunk(response, {
id: uuid,
type: "textResponse",
textResponse,
sources: [],
attachments: [],
close: true,
error: null,
metrics: {},
});
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: {
text: textResponse,
sources: [],
attachments: attachments,
type: chatMode,
metrics: {},
},
threadId: thread?.id || null,
apiSessionId: sessionId,
include: false,
user,
});
return;
}
// If we are here we know that we are in a workspace that is:
// 1. Chatting in "chat" mode and may or may _not_ have embeddings
// 2. Chatting in "query" mode and has at least 1 embedding
let completeText;
let metrics = {};
let contextTexts = [];
let sources = [];
let pinnedDocIdentifiers = [];
const { rawHistory, chatHistory } = await recentChatHistory({
user,
workspace,
thread,
messageLimit,
apiSessionId: sessionId,
});
// Look for pinned documents and see if the user decided to use this feature. We will also do a vector search
// as pinning is a supplemental tool but it should be used with caution since it can easily blow up a context window.
// However we limit the maximum of appended context to 80% of its overall size, mostly because if it expands beyond this
// it will undergo prompt compression anyway to make it work. If there is so much pinned that the context here is bigger than
// what the model can support - it would get compressed anyway and that really is not the point of pinning. It is really best
// suited for high-context models.
await new DocumentManager({
workspace,
maxTokens: LLMConnector.promptWindowLimit(),
})
.pinnedDocs()
.then((pinnedDocs) => {
pinnedDocs.forEach((doc) => {
const { pageContent, ...metadata } = doc;
pinnedDocIdentifiers.push(sourceIdentifier(doc));
contextTexts.push(doc.pageContent);
sources.push({
text:
pageContent.slice(0, 1_000) +
"...continued on in source document...",
...metadata,
});
});
});
const vectorSearchResults =
embeddingsCount !== 0
? await VectorDb.performSimilaritySearch({
namespace: workspace.slug,
input: message,
LLMConnector,
similarityThreshold: workspace?.similarityThreshold,
topN: workspace?.topN,
filterIdentifiers: pinnedDocIdentifiers,
rerank: workspace?.vectorSearchMode === "rerank",
})
: {
contextTexts: [],
sources: [],
message: null,
};
// Failed similarity search if it was run at all and failed.
if (!!vectorSearchResults.message) {
writeResponseChunk(response, {
id: uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: vectorSearchResults.message,
metrics: {},
});
return;
}
const { fillSourceWindow } = require("../helpers/chat");
const filledSources = fillSourceWindow({
nDocs: workspace?.topN || 4,
searchResults: vectorSearchResults.sources,
history: rawHistory,
filterIdentifiers: pinnedDocIdentifiers,
});
// Why does contextTexts get all the info, but sources only get current search?
// This is to give the ability of the LLM to "comprehend" a contextual response without
// populating the Citations under a response with documents the user "thinks" are irrelevant
// due to how we manage backfilling of the context to keep chats with the LLM more correct in responses.
// If a past citation was used to answer the question - that is visible in the history so it logically makes sense
// and does not appear to the user that a new response used information that is otherwise irrelevant for a given prompt.
// TLDR; reduces GitHub issues for "LLM citing document that has no answer in it" while keep answers highly accurate.
contextTexts = [...contextTexts, ...filledSources.contextTexts];
sources = [...sources, ...vectorSearchResults.sources];
// If in query mode and no context chunks are found from search, backfill, or pins - do not
// let the LLM try to hallucinate a response or use general knowledge and exit early
if (chatMode === "query" && contextTexts.length === 0) {
const textResponse =
workspace?.queryRefusalResponse ??
"There is no relevant information in this workspace to answer your query.";
writeResponseChunk(response, {
id: uuid,
type: "textResponse",
textResponse,
sources: [],
close: true,
error: null,
metrics: {},
});
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: {
text: textResponse,
sources: [],
attachments: attachments,
type: chatMode,
metrics: {},
},
threadId: thread?.id || null,
apiSessionId: sessionId,
include: false,
user,
});
return;
}
// Compress & Assemble message to ensure prompt passes token limit with room for response
// and build system messages based on inputs and history.
const messages = await LLMConnector.compressMessages(
{
systemPrompt: await chatPrompt(workspace, user),
userPrompt: message,
contextTexts,
chatHistory,
attachments,
},
rawHistory
);
// If streaming is not explicitly enabled for connector
// we do regular waiting of a response and send a single chunk.
if (LLMConnector.streamingEnabled() !== true) {
console.log(
`\x1b[31m[STREAMING DISABLED]\x1b[0m Streaming is not available for ${LLMConnector.constructor.name}. Will use regular chat method.`
);
const { textResponse, metrics: performanceMetrics } =
await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
});
completeText = textResponse;
metrics = performanceMetrics;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: completeText,
close: true,
error: false,
metrics,
});
} else {
const stream = await LLMConnector.streamGetChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
});
completeText = await LLMConnector.handleStream(response, stream, {
uuid,
sources,
});
metrics = stream.metrics;
}
if (completeText?.length > 0) {
const { chat } = await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: {
text: completeText,
sources,
type: chatMode,
metrics,
attachments,
},
threadId: thread?.id || null,
apiSessionId: sessionId,
user,
});
writeResponseChunk(response, {
uuid,
type: "finalizeResponseStream",
close: true,
error: false,
chatId: chat.id,
metrics,
});
return;
}
writeResponseChunk(response, {
uuid,
type: "finalizeResponseStream",
close: true,
error: false,
});
return;
}
|
Handle streamable HTTP chunks for chats with your workspace via the developer API endpoint
@param {{
response: import("express").Response,
workspace: import("@prisma/client").workspaces,
message:string,
mode: "chat"|"query",
user: import("@prisma/client").users|null,
thread: import("@prisma/client").workspace_threads|null,
sessionId: string|null,
attachments: { name: string; mime: string; contentString: string }[],
reset: boolean,
}} parameters
@returns {Promise<VoidFunction>}
|
streamChat
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/chats/apiChatHandler.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/chats/apiChatHandler.js
|
MIT
|
async function grepAllSlashCommands(message) {
const allPresets = await SlashCommandPresets.where({});
// Replace all preset commands with their corresponding prompts
// Allows multiple commands in one message
let updatedMessage = message;
for (const preset of allPresets) {
const regex = new RegExp(
`(?:\\b\\s|^)(${preset.command})(?:\\b\\s|$)`,
"g"
);
updatedMessage = updatedMessage.replace(regex, preset.prompt);
}
return updatedMessage;
}
|
@description This function will do recursive replacement of all slash commands with their corresponding prompts.
@notice This function is used for API calls and is not user-scoped. THIS FUNCTION DOES NOT SUPPORT PRESET COMMANDS.
@returns {Promise<string>}
|
grepAllSlashCommands
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/chats/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/chats/index.js
|
MIT
|
async function recentChatHistory({
user = null,
workspace,
thread = null,
messageLimit = 20,
apiSessionId = null,
}) {
const rawHistory = (
await WorkspaceChats.where(
{
workspaceId: workspace.id,
user_id: user?.id || null,
thread_id: thread?.id || null,
api_session_id: apiSessionId || null,
include: true,
},
messageLimit,
{ id: "desc" }
)
).reverse();
return { rawHistory, chatHistory: convertToPromptHistory(rawHistory) };
}
|
@description This function will do recursive replacement of all slash commands with their corresponding prompts.
@notice This function is used for API calls and is not user-scoped. THIS FUNCTION DOES NOT SUPPORT PRESET COMMANDS.
@returns {Promise<string>}
|
recentChatHistory
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/chats/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/chats/index.js
|
MIT
|
async function chatPrompt(workspace, user = null) {
const basePrompt =
workspace?.openAiPrompt ??
"Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.";
return await SystemPromptVariables.expandSystemPromptVariables(
basePrompt,
user?.id
);
}
|
Returns the base prompt for the chat. This method will also do variable
substitution on the prompt if there are any defined variables in the prompt.
@param {Object|null} workspace - the workspace object
@param {Object|null} user - the user object
@returns {Promise<string>} - the base prompt
|
chatPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/chats/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/chats/index.js
|
MIT
|
function sourceIdentifier(sourceDocument) {
if (!sourceDocument?.title || !sourceDocument?.published) return uuidv4();
return `title:${sourceDocument.title}-timestamp:${sourceDocument.published}`;
}
|
Returns the base prompt for the chat. This method will also do variable
substitution on the prompt if there are any defined variables in the prompt.
@param {Object|null} workspace - the workspace object
@param {Object|null} user - the user object
@returns {Promise<string>} - the base prompt
|
sourceIdentifier
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/chats/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/chats/index.js
|
MIT
|
constructor() {
const { CommunicationKey } = require("../comKey");
this.comkey = new CommunicationKey();
this.endpoint = `http://0.0.0.0:${process.env.COLLECTOR_PORT || 8888}`;
}
|
@typedef {Object} CollectorOptions
@property {string} whisperProvider - The provider to use for whisper, defaults to "local"
@property {string} WhisperModelPref - The model to use for whisper if set.
@property {string} openAiKey - The API key to use for OpenAI interfacing, mostly passed to OAI Whisper provider.
@property {Object} ocr - The OCR options
@property {{allowAnyIp: "true"|null|undefined}} runtimeSettings - The runtime settings that are passed to the collector. Persisted across requests.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
log(text, ...args) {
console.log(`\x1b[36m[CollectorApi]\x1b[0m ${text}`, ...args);
}
|
@typedef {Object} CollectorOptions
@property {string} whisperProvider - The provider to use for whisper, defaults to "local"
@property {string} WhisperModelPref - The model to use for whisper if set.
@property {string} openAiKey - The API key to use for OpenAI interfacing, mostly passed to OAI Whisper provider.
@property {Object} ocr - The OCR options
@property {{allowAnyIp: "true"|null|undefined}} runtimeSettings - The runtime settings that are passed to the collector. Persisted across requests.
|
log
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async online() {
return await fetch(this.endpoint)
.then((res) => res.ok)
.catch(() => false);
}
|
Attach options to the request passed to the collector API
@returns {CollectorOptions}
|
online
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async acceptedFileTypes() {
return await fetch(`${this.endpoint}/accepts`)
.then((res) => {
if (!res.ok) throw new Error("failed to GET /accepts");
return res.json();
})
.then((res) => res)
.catch((e) => {
this.log(e.message);
return null;
});
}
|
Attach options to the request passed to the collector API
@returns {CollectorOptions}
|
acceptedFileTypes
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async processDocument(filename = "") {
if (!filename) return false;
const data = JSON.stringify({
filename,
options: this.#attachOptions(),
});
return await fetch(`${this.endpoint}/process`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"X-Integrity": this.comkey.sign(data),
"X-Payload-Signer": this.comkey.encrypt(
new EncryptionManager().xPayload
),
},
body: data,
})
.then((res) => {
if (!res.ok) throw new Error("Response could not be completed");
return res.json();
})
.then((res) => res)
.catch((e) => {
this.log(e.message);
return { success: false, reason: e.message, documents: [] };
});
}
|
Process a document
- Will append the options to the request body
@param {string} filename - The filename of the document to process
@returns {Promise<Object>} - The response from the collector API
|
processDocument
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async processLink(link = "", scraperHeaders = {}) {
if (!link) return false;
const data = JSON.stringify({
link,
scraperHeaders,
options: this.#attachOptions(),
});
return await fetch(`${this.endpoint}/process-link`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"X-Integrity": this.comkey.sign(data),
"X-Payload-Signer": this.comkey.encrypt(
new EncryptionManager().xPayload
),
},
body: data,
})
.then((res) => {
if (!res.ok) throw new Error("Response could not be completed");
return res.json();
})
.then((res) => res)
.catch((e) => {
this.log(e.message);
return { success: false, reason: e.message, documents: [] };
});
}
|
Process a link
- Will append the options to the request body
@param {string} link - The link to process
@param {{[key: string]: string}} scraperHeaders - Custom headers to apply to the web-scraping request URL
@returns {Promise<Object>} - The response from the collector API
|
processLink
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async processRawText(textContent = "", metadata = {}) {
const data = JSON.stringify({
textContent,
metadata,
options: this.#attachOptions(),
});
return await fetch(`${this.endpoint}/process-raw-text`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"X-Integrity": this.comkey.sign(data),
"X-Payload-Signer": this.comkey.encrypt(
new EncryptionManager().xPayload
),
},
body: data,
})
.then((res) => {
if (!res.ok) throw new Error("Response could not be completed");
return res.json();
})
.then((res) => res)
.catch((e) => {
this.log(e.message);
return { success: false, reason: e.message, documents: [] };
});
}
|
Process raw text as a document for the collector
- Will append the options to the request body
@param {string} textContent - The text to process
@param {Object} metadata - The metadata to process
@returns {Promise<Object>} - The response from the collector API
|
processRawText
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async forwardExtensionRequest({ endpoint, method, body }) {
return await fetch(`${this.endpoint}${endpoint}`, {
method,
body, // Stringified JSON!
headers: {
"Content-Type": "application/json",
"X-Integrity": this.comkey.sign(body),
"X-Payload-Signer": this.comkey.encrypt(
new EncryptionManager().xPayload
),
},
})
.then((res) => {
if (!res.ok) throw new Error("Response could not be completed");
return res.json();
})
.then((res) => res)
.catch((e) => {
this.log(e.message);
return { success: false, data: {}, reason: e.message };
});
}
|
Process raw text as a document for the collector
- Will append the options to the request body
@param {string} textContent - The text to process
@param {Object} metadata - The metadata to process
@returns {Promise<Object>} - The response from the collector API
|
forwardExtensionRequest
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async getLinkContent(link = "", captureAs = "text") {
if (!link) return false;
const data = JSON.stringify({
link,
captureAs,
options: this.#attachOptions(),
});
return await fetch(`${this.endpoint}/util/get-link`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"X-Integrity": this.comkey.sign(data),
"X-Payload-Signer": this.comkey.encrypt(
new EncryptionManager().xPayload
),
},
body: data,
})
.then((res) => {
if (!res.ok) throw new Error("Response could not be completed");
return res.json();
})
.then((res) => res)
.catch((e) => {
this.log(e.message);
return { success: false, content: null };
});
}
|
Get the content of a link only in a specific format
- Will append the options to the request body
@param {string} link - The link to get the content of
@param {"text"|"html"} captureAs - The format to capture the content as
@returns {Promise<Object>} - The response from the collector API
|
getLinkContent
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/collectorApi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/collectorApi/index.js
|
MIT
|
async embedTextInput(textInput) {
const result = await this.embedChunks(
Array.isArray(textInput) ? textInput : [textInput]
);
return result?.[0] || [];
}
|
Embeds a single text input
@param {string|string[]} textInput - The text to embed
@returns {Promise<Array<number>>} The embedding values
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/gemini/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/gemini/index.js
|
MIT
|
async embedChunks(textChunks = []) {
this.log(`Embedding ${textChunks.length} chunks...`);
// Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb)
// we concurrently execute each max batch of text chunks possible.
// Refer to constructor maxConcurrentChunks for more info.
const embeddingRequests = [];
for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) {
embeddingRequests.push(
new Promise((resolve) => {
this.openai.embeddings
.create({
model: this.model,
input: chunk,
})
.then((result) => {
resolve({ data: result?.data, error: null });
})
.catch((e) => {
e.type =
e?.response?.data?.error?.code ||
e?.response?.status ||
"failed_to_embed";
e.message = e?.response?.data?.error?.message || e.message;
resolve({ data: [], error: e });
});
})
);
}
const { data = [], error = null } = await Promise.all(
embeddingRequests
).then((results) => {
// If any errors were returned from OpenAI abort the entire sequence because the embeddings
// will be incomplete.
const errors = results
.filter((res) => !!res.error)
.map((res) => res.error)
.flat();
if (errors.length > 0) {
let uniqueErrors = new Set();
errors.map((error) =>
uniqueErrors.add(`[${error.type}]: ${error.message}`)
);
return {
data: [],
error: Array.from(uniqueErrors).join(", "),
};
}
return {
data: results.map((res) => res?.data || []).flat(),
error: null,
};
});
if (!!error) throw new Error(`Gemini Failed to embed: ${error}`);
return data.length > 0 &&
data.every((embd) => embd.hasOwnProperty("embedding"))
? data.map((embd) => embd.embedding)
: null;
}
|
Embeds a list of text inputs
@param {string[]} textChunks - The list of text to embed
@returns {Promise<Array<Array<number>>>} The embedding values
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/gemini/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/gemini/index.js
|
MIT
|
get maxConcurrentChunks() {
if (!process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS)
return 500;
if (
isNaN(Number(process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS))
)
return 500;
return Number(process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS);
}
|
returns the `GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS` env variable as a number
or 500 if the env variable is not set or is not a number.
@returns {number}
|
maxConcurrentChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/genericOpenAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/genericOpenAi/index.js
|
MIT
|
async embedTextInput(textInput) {
const result = await this.embedChunks(
Array.isArray(textInput) ? textInput : [textInput]
);
return result?.[0] || [];
}
|
returns the `GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS` env variable as a number
or 500 if the env variable is not set or is not a number.
@returns {number}
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/genericOpenAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/genericOpenAi/index.js
|
MIT
|
async embedChunks(textChunks = []) {
// Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb)
// we concurrently execute each max batch of text chunks possible.
// Refer to constructor maxConcurrentChunks for more info.
const embeddingRequests = [];
for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) {
embeddingRequests.push(
new Promise((resolve) => {
this.openai.embeddings
.create({
model: this.model,
input: chunk,
})
.then((result) => {
resolve({ data: result?.data, error: null });
})
.catch((e) => {
e.type =
e?.response?.data?.error?.code ||
e?.response?.status ||
"failed_to_embed";
e.message = e?.response?.data?.error?.message || e.message;
resolve({ data: [], error: e });
});
})
);
}
const { data = [], error = null } = await Promise.all(
embeddingRequests
).then((results) => {
// If any errors were returned from OpenAI abort the entire sequence because the embeddings
// will be incomplete.
const errors = results
.filter((res) => !!res.error)
.map((res) => res.error)
.flat();
if (errors.length > 0) {
let uniqueErrors = new Set();
errors.map((error) =>
uniqueErrors.add(`[${error.type}]: ${error.message}`)
);
return {
data: [],
error: Array.from(uniqueErrors).join(", "),
};
}
return {
data: results.map((res) => res?.data || []).flat(),
error: null,
};
});
if (!!error) throw new Error(`GenericOpenAI Failed to embed: ${error}`);
return data.length > 0 &&
data.every((embd) => embd.hasOwnProperty("embedding"))
? data.map((embd) => embd.embedding)
: null;
}
|
returns the `GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS` env variable as a number
or 500 if the env variable is not set or is not a number.
@returns {number}
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/genericOpenAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/genericOpenAi/index.js
|
MIT
|
async embedTextInput(textInput) {
const result = await this.embedChunks(
Array.isArray(textInput) ? textInput : [textInput]
);
return result?.[0] || [];
}
|
Checks if the Ollama service is alive by pinging the base path.
@returns {Promise<boolean>} - A promise that resolves to true if the service is alive, false otherwise.
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/ollama/index.js
|
MIT
|
async embedChunks(textChunks = []) {
if (!(await this.#isAlive()))
throw new Error(
`Ollama service could not be reached. Is Ollama running?`
);
this.log(
`Embedding ${textChunks.length} chunks of text with ${this.model}.`
);
let data = [];
let error = null;
for (const chunk of textChunks) {
try {
const res = await this.client.embeddings({
model: this.model,
prompt: chunk,
options: {
// Always set the num_ctx to the max chunk length defined by the user in the settings
// so that the maximum context window is used and content is not truncated.
num_ctx: this.embeddingMaxChunkLength,
},
});
const { embedding } = res;
if (!Array.isArray(embedding) || embedding.length === 0)
throw new Error("Ollama returned an empty embedding for chunk!");
data.push(embedding);
} catch (err) {
this.log(err.message);
error = err.message;
data = [];
break;
}
}
if (!!error) throw new Error(`Ollama Failed to embed: ${error}`);
return data.length > 0 ? data : null;
}
|
This function takes an array of text chunks and embeds them using the Ollama API.
chunks are processed sequentially to avoid overwhelming the API with too many requests
or running out of resources on the endpoint running the ollama instance.
We will use the num_ctx option to set the maximum context window to the max chunk length defined by the user in the settings
so that the maximum context window is used and content is not truncated.
We also assume the default keep alive option. This could cause issues with models being unloaded and reloaded
on load memory machines, but that is simply a user-end issue we cannot control. If the LLM and embedder are
constantly being loaded and unloaded, the user should use another LLM or Embedder to avoid this issue.
@param {string[]} textChunks - An array of text chunks to embed.
@returns {Promise<Array<number[]>>} - A promise that resolves to an array of embeddings.
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingEngines/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingEngines/ollama/index.js
|
MIT
|
get host() {
if (!NativeEmbeddingReranker.#transformers) return "https://huggingface.co";
try {
return new URL(NativeEmbeddingReranker.#transformers.env.remoteHost).host;
} catch (e) {
return this.#fallbackHost;
}
}
|
This function will return the host of the current reranker suite.
If the reranker suite is not initialized, it will return the default HF host.
@returns {string} The host of the current reranker suite.
|
host
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingRerankers/native/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingRerankers/native/index.js
|
MIT
|
async preload() {
try {
this.log(`Preloading reranker suite...`);
await this.initClient();
this.log(
`Preloaded reranker suite. Reranking is available as a service now.`
);
return;
} catch (e) {
console.error(e);
this.log(
`Failed to preload reranker suite. Reranking will be available on the first rerank call.`
);
return;
}
}
|
This function will preload the reranker suite and tokenizer.
This is useful for reducing the latency of the first rerank call and pre-downloading the models and such
to avoid having to wait for the models to download on the first rerank call.
|
preload
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingRerankers/native/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingRerankers/native/index.js
|
MIT
|
async initClient() {
if (NativeEmbeddingReranker.#transformers) {
this.log(`Reranker suite already initialized - reusing.`);
return;
}
await import("@xenova/transformers").then(
async ({ AutoModelForSequenceClassification, AutoTokenizer, env }) => {
this.log(`Loading reranker suite...`);
NativeEmbeddingReranker.#transformers = {
AutoModelForSequenceClassification,
AutoTokenizer,
env,
};
// Attempt to load the model and tokenizer in this order:
// 1. From local file system cache
// 2. Download and cache from remote host (hf.co)
// 3. Download and cache from fallback host (cdn.anythingllm.com)
await this.#getPreTrainedModel();
await this.#getPreTrainedTokenizer();
}
);
return;
}
|
This function will preload the reranker suite and tokenizer.
This is useful for reducing the latency of the first rerank call and pre-downloading the models and such
to avoid having to wait for the models to download on the first rerank call.
|
initClient
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingRerankers/native/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingRerankers/native/index.js
|
MIT
|
async rerank(query, documents, options = { topK: 4 }) {
await this.initClient();
const model = NativeEmbeddingReranker.#model;
const tokenizer = NativeEmbeddingReranker.#tokenizer;
const start = Date.now();
this.log(`Reranking ${documents.length} documents...`);
const inputs = tokenizer(new Array(documents.length).fill(query), {
text_pair: documents.map((doc) => doc.text),
padding: true,
truncation: true,
});
const { logits } = await model(inputs);
const reranked = logits
.sigmoid()
.tolist()
.map(([score], i) => ({
rerank_corpus_id: i,
rerank_score: score,
...documents[i],
}))
.sort((a, b) => b.rerank_score - a.rerank_score)
.slice(0, options.topK);
this.log(
`Reranking ${documents.length} documents to top ${options.topK} took ${Date.now() - start}ms`
);
return reranked;
}
|
Reranks a list of documents based on the query.
@param {string} query - The query to rerank the documents against.
@param {{text: string}[]} documents - The list of document text snippets to rerank. Should be output from a vector search.
@param {Object} options - The options for the reranking.
@param {number} options.topK - The number of top documents to return.
@returns {Promise<any[]>} - The reranked list of documents.
|
rerank
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/EmbeddingRerankers/native/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/EmbeddingRerankers/native/index.js
|
MIT
|
async function cachedVectorInformation(filename = null, checkOnly = false) {
if (!filename) return checkOnly ? false : { exists: false, chunks: [] };
const digest = uuidv5(filename, uuidv5.URL);
const file = path.resolve(vectorCachePath, `${digest}.json`);
const exists = fs.existsSync(file);
if (checkOnly) return exists;
if (!exists) return { exists, chunks: [] };
console.log(
`Cached vectorized results of ${filename} found! Using cached data to save on embed costs.`
);
const rawData = fs.readFileSync(file, "utf8");
return { exists: true, chunks: JSON.parse(rawData) };
}
|
Searches the vector-cache folder for existing information so we dont have to re-embed a
document and can instead push directly to vector db.
@param {string} filename - the filename to check for cached vector information
@param {boolean} checkOnly - if true, only check if the file exists, do not return the cached data
@returns {Promise<{exists: boolean, chunks: any[]}>} - a promise that resolves to an object containing the existence of the file and its cached chunks
|
cachedVectorInformation
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function storeVectorResult(vectorData = [], filename = null) {
if (!filename) return;
console.log(
`Caching vectorized results of ${filename} to prevent duplicated embedding.`
);
if (!fs.existsSync(vectorCachePath)) fs.mkdirSync(vectorCachePath);
const digest = uuidv5(filename, uuidv5.URL);
const writeTo = path.resolve(vectorCachePath, `${digest}.json`);
fs.writeFileSync(writeTo, JSON.stringify(vectorData), "utf8");
return;
}
|
Searches the vector-cache folder for existing information so we dont have to re-embed a
document and can instead push directly to vector db.
@param {string} filename - the filename to check for cached vector information
@param {boolean} checkOnly - if true, only check if the file exists, do not return the cached data
@returns {Promise<{exists: boolean, chunks: any[]}>} - a promise that resolves to an object containing the existence of the file and its cached chunks
|
storeVectorResult
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function purgeSourceDocument(filename = null) {
if (!filename) return;
const filePath = path.resolve(documentsPath, normalizePath(filename));
if (
!fs.existsSync(filePath) ||
!isWithin(documentsPath, filePath) ||
!fs.lstatSync(filePath).isFile()
)
return;
console.log(`Purging source document of ${filename}.`);
fs.rmSync(filePath);
return;
}
|
Searches the vector-cache folder for existing information so we dont have to re-embed a
document and can instead push directly to vector db.
@param {string} filename - the filename to check for cached vector information
@param {boolean} checkOnly - if true, only check if the file exists, do not return the cached data
@returns {Promise<{exists: boolean, chunks: any[]}>} - a promise that resolves to an object containing the existence of the file and its cached chunks
|
purgeSourceDocument
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function purgeVectorCache(filename = null) {
if (!filename) return;
const digest = uuidv5(filename, uuidv5.URL);
const filePath = path.resolve(vectorCachePath, `${digest}.json`);
if (!fs.existsSync(filePath) || !fs.lstatSync(filePath).isFile()) return;
console.log(`Purging vector-cache of ${filename}.`);
fs.rmSync(filePath);
return;
}
|
Searches the vector-cache folder for existing information so we dont have to re-embed a
document and can instead push directly to vector db.
@param {string} filename - the filename to check for cached vector information
@param {boolean} checkOnly - if true, only check if the file exists, do not return the cached data
@returns {Promise<{exists: boolean, chunks: any[]}>} - a promise that resolves to an object containing the existence of the file and its cached chunks
|
purgeVectorCache
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function findDocumentInDocuments(documentName = null) {
if (!documentName) return null;
for (const folder of fs.readdirSync(documentsPath)) {
const isFolder = fs
.lstatSync(path.join(documentsPath, folder))
.isDirectory();
if (!isFolder) continue;
const targetFilename = normalizePath(documentName);
const targetFileLocation = path.join(documentsPath, folder, targetFilename);
if (
!fs.existsSync(targetFileLocation) ||
!isWithin(documentsPath, targetFileLocation)
)
continue;
const fileData = fs.readFileSync(targetFileLocation, "utf8");
const cachefilename = `${folder}/${targetFilename}`;
const { pageContent, ...metadata } = JSON.parse(fileData);
return {
name: targetFilename,
type: "file",
...metadata,
cached: await cachedVectorInformation(cachefilename, true),
};
}
return null;
}
|
Searches the vector-cache folder for existing information so we dont have to re-embed a
document and can instead push directly to vector db.
@param {string} filename - the filename to check for cached vector information
@param {boolean} checkOnly - if true, only check if the file exists, do not return the cached data
@returns {Promise<{exists: boolean, chunks: any[]}>} - a promise that resolves to an object containing the existence of the file and its cached chunks
|
findDocumentInDocuments
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
function isWithin(outer, inner) {
if (outer === inner) return false;
const rel = path.relative(outer, inner);
return !rel.startsWith("../") && rel !== "..";
}
|
Checks if a given path is within another path.
@param {string} outer - The outer path (should be resolved).
@param {string} inner - The inner path (should be resolved).
@returns {boolean} - Returns true if the inner path is within the outer path, false otherwise.
|
isWithin
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
function normalizePath(filepath = "") {
const result = path
.normalize(filepath.trim())
.replace(/^(\.\.(\/|\\|$))+/, "")
.trim();
if (["..", ".", "/"].includes(result)) throw new Error("Invalid path.");
return result;
}
|
Checks if a given path is within another path.
@param {string} outer - The outer path (should be resolved).
@param {string} inner - The inner path (should be resolved).
@returns {boolean} - Returns true if the inner path is within the outer path, false otherwise.
|
normalizePath
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
function hasVectorCachedFiles() {
try {
return (
fs.readdirSync(vectorCachePath)?.filter((name) => name.endsWith(".json"))
.length !== 0
);
} catch {}
return false;
}
|
Checks if a given path is within another path.
@param {string} outer - The outer path (should be resolved).
@param {string} inner - The inner path (should be resolved).
@returns {boolean} - Returns true if the inner path is within the outer path, false otherwise.
|
hasVectorCachedFiles
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function getPinnedWorkspacesByDocument(filenames = []) {
return (
await Document.where(
{
docpath: {
in: Object.keys(filenames),
},
pinned: true,
},
null,
null,
null,
{
workspaceId: true,
docpath: true,
}
)
).reduce((result, { workspaceId, docpath }) => {
const filename = filenames[docpath];
if (!result[filename]) result[filename] = [];
if (!result[filename].includes(workspaceId))
result[filename].push(workspaceId);
return result;
}, {});
}
|
@param {string[]} filenames - array of filenames to check for pinned workspaces
@returns {Promise<Record<string, string[]>>} - a record of filenames and their corresponding workspaceIds
|
getPinnedWorkspacesByDocument
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function getWatchedDocumentFilenames(filenames = []) {
return (
await Document.where(
{
docpath: { in: Object.keys(filenames) },
watched: true,
},
null,
null,
null,
{ workspaceId: true, docpath: true }
)
).reduce((result, { workspaceId, docpath }) => {
const filename = filenames[docpath];
result[filename] = workspaceId;
return result;
}, {});
}
|
Get a record of filenames and their corresponding workspaceIds that have watched a document
that will be used to determine if a document should be displayed in the watched documents sidebar
@param {string[]} filenames - array of filenames to check for watched workspaces
@returns {Promise<Record<string, string[]>>} - a record of filenames and their corresponding workspaceIds
|
getWatchedDocumentFilenames
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
function purgeEntireVectorCache() {
fs.rmSync(vectorCachePath, { recursive: true, force: true });
fs.mkdirSync(vectorCachePath);
return;
}
|
Purges the entire vector-cache folder and recreates it.
@returns {void}
|
purgeEntireVectorCache
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
async function fileToPickerData({
pathToFile,
liveSyncAvailable = false,
cachefilename = null,
}) {
let metadata = {};
const filename = path.basename(pathToFile);
const fileStats = fs.statSync(pathToFile);
const cachedStatus = await cachedVectorInformation(cachefilename, true);
if (fileStats.size < FILE_READ_SIZE_THRESHOLD) {
const rawData = fs.readFileSync(pathToFile, "utf8");
try {
metadata = JSON.parse(rawData);
// Remove the pageContent field from the metadata - it is large and not needed for the picker
delete metadata.pageContent;
} catch (err) {
console.error("Error parsing file", err);
return null;
}
return {
name: filename,
type: "file",
...metadata,
cached: cachedStatus,
canWatch: liveSyncAvailable
? DocumentSyncQueue.canWatch(metadata)
: false,
// pinnedWorkspaces: [], // This is the list of workspaceIds that have pinned this document
// watched: false, // boolean to indicate if this document is watched in ANY workspace
};
}
console.log(
`Stream-parsing ${path.basename(pathToFile)} because it exceeds the ${FILE_READ_SIZE_THRESHOLD} byte limit.`
);
const stream = fs.createReadStream(pathToFile, { encoding: "utf8" });
try {
let fileContent = "";
metadata = await new Promise((resolve, reject) => {
stream
.on("data", (chunk) => {
fileContent += chunk;
})
.on("end", () => {
metadata = JSON.parse(fileContent);
// Remove the pageContent field from the metadata - it is large and not needed for the picker
delete metadata.pageContent;
resolve(metadata);
})
.on("error", (err) => {
console.error("Error parsing file", err);
reject(null);
});
}).catch((err) => {
console.error("Error parsing file", err);
});
} catch (err) {
console.error("Error parsing file", err);
metadata = null;
} finally {
stream.destroy();
}
// If the metadata is empty or something went wrong, return null
if (!metadata || !Object.keys(metadata)?.length) {
console.log(`Stream-parsing failed for ${path.basename(pathToFile)}`);
return null;
}
return {
name: filename,
type: "file",
...metadata,
cached: cachedStatus,
canWatch: liveSyncAvailable ? DocumentSyncQueue.canWatch(metadata) : false,
};
}
|
Converts a file to picker data
@param {string} pathToFile - The path to the file to convert
@param {boolean} liveSyncAvailable - Whether live sync is available
@returns {Promise<{name: string, type: string, [string]: any, cached: boolean, canWatch: boolean}>} - The picker data
|
fileToPickerData
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
function hasRequiredMetadata(metadata = {}) {
return REQUIRED_FILE_OBJECT_FIELDS.every((field) =>
metadata.hasOwnProperty(field)
);
}
|
Checks if a given metadata object has all the required fields
@param {{name: string, type: string, url: string, title: string, docAuthor: string, description: string, docSource: string, chunkSource: string, published: string, wordCount: number, token_count_estimate: number}} metadata - The metadata object to check (fileToPickerData)
@returns {boolean} - Returns true if the metadata object has all the required fields, false otherwise
|
hasRequiredMetadata
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/index.js
|
MIT
|
function isDefaultFilename(filename) {
return [LOGO_FILENAME, LOGO_FILENAME_DARK].includes(filename);
}
|
Checks if the filename is the default logo filename for dark or light mode.
@param {string} filename - The filename to check.
@returns {boolean} Whether the filename is the default logo filename.
|
isDefaultFilename
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
function validFilename(newFilename = "") {
return !isDefaultFilename(newFilename);
}
|
Checks if the filename is the default logo filename for dark or light mode.
@param {string} filename - The filename to check.
@returns {boolean} Whether the filename is the default logo filename.
|
validFilename
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
function getDefaultFilename(darkMode = true) {
return darkMode ? LOGO_FILENAME : LOGO_FILENAME_DARK;
}
|
Shows the logo for the current theme. In dark mode, it shows the light logo
and vice versa.
@param {boolean} darkMode - Whether the logo should be for dark mode.
@returns {string} The filename of the logo.
|
getDefaultFilename
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
async function determineLogoFilepath(defaultFilename = LOGO_FILENAME) {
const currentLogoFilename = await SystemSettings.currentLogoFilename();
const basePath = process.env.STORAGE_DIR
? path.join(process.env.STORAGE_DIR, "assets")
: path.join(__dirname, "../../storage/assets");
const defaultFilepath = path.join(basePath, defaultFilename);
if (currentLogoFilename && validFilename(currentLogoFilename)) {
customLogoPath = path.join(basePath, normalizePath(currentLogoFilename));
if (!isWithin(path.resolve(basePath), path.resolve(customLogoPath)))
return defaultFilepath;
return fs.existsSync(customLogoPath) ? customLogoPath : defaultFilepath;
}
return defaultFilepath;
}
|
Shows the logo for the current theme. In dark mode, it shows the light logo
and vice versa.
@param {boolean} darkMode - Whether the logo should be for dark mode.
@returns {string} The filename of the logo.
|
determineLogoFilepath
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
function fetchLogo(logoPath) {
if (!fs.existsSync(logoPath)) {
return {
found: false,
buffer: null,
size: 0,
mime: "none/none",
};
}
const mime = getType(logoPath);
const buffer = fs.readFileSync(logoPath);
return {
found: true,
buffer,
size: buffer.length,
mime,
};
}
|
Shows the logo for the current theme. In dark mode, it shows the light logo
and vice versa.
@param {boolean} darkMode - Whether the logo should be for dark mode.
@returns {string} The filename of the logo.
|
fetchLogo
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
async function renameLogoFile(originalFilename = null) {
const extname = path.extname(originalFilename) || ".png";
const newFilename = `${v4()}${extname}`;
const assetsDirectory = process.env.STORAGE_DIR
? path.join(process.env.STORAGE_DIR, "assets")
: path.join(__dirname, `../../storage/assets`);
const originalFilepath = path.join(
assetsDirectory,
normalizePath(originalFilename)
);
if (!isWithin(path.resolve(assetsDirectory), path.resolve(originalFilepath)))
throw new Error("Invalid file path.");
// The output always uses a random filename.
const outputFilepath = process.env.STORAGE_DIR
? path.join(process.env.STORAGE_DIR, "assets", normalizePath(newFilename))
: path.join(__dirname, `../../storage/assets`, normalizePath(newFilename));
fs.renameSync(originalFilepath, outputFilepath);
return newFilename;
}
|
Shows the logo for the current theme. In dark mode, it shows the light logo
and vice versa.
@param {boolean} darkMode - Whether the logo should be for dark mode.
@returns {string} The filename of the logo.
|
renameLogoFile
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
async function removeCustomLogo(logoFilename = LOGO_FILENAME) {
if (!logoFilename || !validFilename(logoFilename)) return false;
const assetsDirectory = process.env.STORAGE_DIR
? path.join(process.env.STORAGE_DIR, "assets")
: path.join(__dirname, `../../storage/assets`);
const logoPath = path.join(assetsDirectory, normalizePath(logoFilename));
if (!isWithin(path.resolve(assetsDirectory), path.resolve(logoPath)))
throw new Error("Invalid file path.");
if (fs.existsSync(logoPath)) fs.unlinkSync(logoPath);
return true;
}
|
Shows the logo for the current theme. In dark mode, it shows the light logo
and vice versa.
@param {boolean} darkMode - Whether the logo should be for dark mode.
@returns {string} The filename of the logo.
|
removeCustomLogo
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/logo.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/logo.js
|
MIT
|
function handleFileUpload(request, response, next) {
const upload = multer({ storage: fileUploadStorage }).single("file");
upload(request, response, function (err) {
if (err) {
response
.status(500)
.json({
success: false,
error: `Invalid file upload. ${err.message}`,
})
.end();
return;
}
next();
});
}
|
Handle Generic file upload as documents from the GUI
@param {Request} request
@param {Response} response
@param {NextFunction} next
|
handleFileUpload
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/multer.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/multer.js
|
MIT
|
function handleAPIFileUpload(request, response, next) {
const upload = multer({ storage: fileAPIUploadStorage }).single("file");
upload(request, response, function (err) {
if (err) {
response
.status(500)
.json({
success: false,
error: `Invalid file upload. ${err.message}`,
})
.end();
return;
}
next();
});
}
|
Handle API file upload as documents - this does not manipulate the filename
at all for encoding/charset reasons.
@param {Request} request
@param {Response} response
@param {NextFunction} next
|
handleAPIFileUpload
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/multer.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/multer.js
|
MIT
|
function handlePfpUpload(request, response, next) {
const upload = multer({ storage: pfpUploadStorage }).single("file");
upload(request, response, function (err) {
if (err) {
response
.status(500)
.json({
success: false,
error: `Invalid file upload. ${err.message}`,
})
.end();
return;
}
next();
});
}
|
Handle PFP file upload as logos
|
handlePfpUpload
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/multer.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/multer.js
|
MIT
|
async function purgeFolder(folderName = null) {
if (!folderName) return;
const subFolder = normalizePath(folderName);
const subFolderPath = path.resolve(documentsPath, subFolder);
const validRemovableSubFolders = fs
.readdirSync(documentsPath)
.map((folder) => {
// Filter out any results which are not folders or
// are the protected custom-documents folder.
if (folder === "custom-documents") return null;
const subfolderPath = path.resolve(documentsPath, folder);
if (!fs.lstatSync(subfolderPath).isDirectory()) return null;
return folder;
})
.filter((subFolder) => !!subFolder);
if (
!validRemovableSubFolders.includes(subFolder) ||
!fs.existsSync(subFolderPath) ||
!isWithin(documentsPath, subFolderPath)
)
return;
const filenames = fs
.readdirSync(subFolderPath)
.map((file) =>
path.join(subFolderPath, file).replace(documentsPath + "/", "")
);
const workspaces = await Workspace.where();
const purgePromises = [];
// Remove associated Vector-cache files
for (const filename of filenames) {
const rmVectorCache = () =>
new Promise((resolve) =>
purgeVectorCache(filename).then(() => resolve(true))
);
purgePromises.push(rmVectorCache);
}
// Remove workspace document associations
for (const workspace of workspaces) {
const rmWorkspaceDoc = () =>
new Promise((resolve) =>
Document.removeDocuments(workspace, filenames).then(() => resolve(true))
);
purgePromises.push(rmWorkspaceDoc);
}
await Promise.all(purgePromises.flat().map((f) => f()));
fs.rmSync(subFolderPath, { recursive: true }); // Delete target document-folder and source files.
return;
}
|
Purge a folder and all its contents. This will also remove all vector-cache files and workspace document associations
for the documents within the folder.
@notice This function is not recursive. It only purges the contents of the specified folder.
@notice You cannot purge the `custom-documents` folder.
@param {string} folderName - The name/path of the folder to purge.
@returns {Promise<void>}
|
purgeFolder
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/purgeDocument.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/purgeDocument.js
|
MIT
|
rmVectorCache = () =>
new Promise((resolve) =>
purgeVectorCache(filename).then(() => resolve(true))
)
|
Purge a folder and all its contents. This will also remove all vector-cache files and workspace document associations
for the documents within the folder.
@notice This function is not recursive. It only purges the contents of the specified folder.
@notice You cannot purge the `custom-documents` folder.
@param {string} folderName - The name/path of the folder to purge.
@returns {Promise<void>}
|
rmVectorCache
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/purgeDocument.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/purgeDocument.js
|
MIT
|
rmVectorCache = () =>
new Promise((resolve) =>
purgeVectorCache(filename).then(() => resolve(true))
)
|
Purge a folder and all its contents. This will also remove all vector-cache files and workspace document associations
for the documents within the folder.
@notice This function is not recursive. It only purges the contents of the specified folder.
@notice You cannot purge the `custom-documents` folder.
@param {string} folderName - The name/path of the folder to purge.
@returns {Promise<void>}
|
rmVectorCache
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/purgeDocument.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/purgeDocument.js
|
MIT
|
rmWorkspaceDoc = () =>
new Promise((resolve) =>
Document.removeDocuments(workspace, filenames).then(() => resolve(true))
)
|
Purge a folder and all its contents. This will also remove all vector-cache files and workspace document associations
for the documents within the folder.
@notice This function is not recursive. It only purges the contents of the specified folder.
@notice You cannot purge the `custom-documents` folder.
@param {string} folderName - The name/path of the folder to purge.
@returns {Promise<void>}
|
rmWorkspaceDoc
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/purgeDocument.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/purgeDocument.js
|
MIT
|
rmWorkspaceDoc = () =>
new Promise((resolve) =>
Document.removeDocuments(workspace, filenames).then(() => resolve(true))
)
|
Purge a folder and all its contents. This will also remove all vector-cache files and workspace document associations
for the documents within the folder.
@notice This function is not recursive. It only purges the contents of the specified folder.
@notice You cannot purge the `custom-documents` folder.
@param {string} folderName - The name/path of the folder to purge.
@returns {Promise<void>}
|
rmWorkspaceDoc
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/files/purgeDocument.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/files/purgeDocument.js
|
MIT
|
function getVectorDbClass(getExactly = null) {
const vectorSelection = getExactly ?? process.env.VECTOR_DB ?? "lancedb";
switch (vectorSelection) {
case "pinecone":
const { Pinecone } = require("../vectorDbProviders/pinecone");
return Pinecone;
case "chroma":
const { Chroma } = require("../vectorDbProviders/chroma");
return Chroma;
case "lancedb":
const { LanceDb } = require("../vectorDbProviders/lance");
return LanceDb;
case "weaviate":
const { Weaviate } = require("../vectorDbProviders/weaviate");
return Weaviate;
case "qdrant":
const { QDrant } = require("../vectorDbProviders/qdrant");
return QDrant;
case "milvus":
const { Milvus } = require("../vectorDbProviders/milvus");
return Milvus;
case "zilliz":
const { Zilliz } = require("../vectorDbProviders/zilliz");
return Zilliz;
case "astra":
const { AstraDB } = require("../vectorDbProviders/astra");
return AstraDB;
case "pgvector":
const { PGVector } = require("../vectorDbProviders/pgvector");
return PGVector;
default:
throw new Error("ENV: No VECTOR_DB value found in environment!");
}
}
|
Gets the systems current vector database provider.
@param {('pinecone' | 'chroma' | 'lancedb' | 'weaviate' | 'qdrant' | 'milvus' | 'zilliz' | 'astra') | null} getExactly - If provided, this will return an explit provider.
@returns { BaseVectorDatabaseProvider}
|
getVectorDbClass
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
function getLLMProvider({ provider = null, model = null } = {}) {
const LLMSelection = provider ?? process.env.LLM_PROVIDER ?? "openai";
const embedder = getEmbeddingEngineSelection();
switch (LLMSelection) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return new OpenAiLLM(embedder, model);
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return new AzureOpenAiLLM(embedder, model);
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return new AnthropicLLM(embedder, model);
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder, model);
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return new LMStudioLLM(embedder, model);
case "localai":
const { LocalAiLLM } = require("../AiProviders/localAi");
return new LocalAiLLM(embedder, model);
case "ollama":
const { OllamaAILLM } = require("../AiProviders/ollama");
return new OllamaAILLM(embedder, model);
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return new TogetherAiLLM(embedder, model);
case "fireworksai":
const { FireworksAiLLM } = require("../AiProviders/fireworksAi");
return new FireworksAiLLM(embedder, model);
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return new PerplexityLLM(embedder, model);
case "openrouter":
const { OpenRouterLLM } = require("../AiProviders/openRouter");
return new OpenRouterLLM(embedder, model);
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return new MistralLLM(embedder, model);
case "huggingface":
const { HuggingFaceLLM } = require("../AiProviders/huggingface");
return new HuggingFaceLLM(embedder, model);
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return new GroqLLM(embedder, model);
case "koboldcpp":
const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
return new KoboldCPPLLM(embedder, model);
case "textgenwebui":
const { TextGenWebUILLM } = require("../AiProviders/textGenWebUI");
return new TextGenWebUILLM(embedder, model);
case "cohere":
const { CohereLLM } = require("../AiProviders/cohere");
return new CohereLLM(embedder, model);
case "litellm":
const { LiteLLM } = require("../AiProviders/liteLLM");
return new LiteLLM(embedder, model);
case "generic-openai":
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
return new GenericOpenAiLLM(embedder, model);
case "bedrock":
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
return new AWSBedrockLLM(embedder, model);
case "deepseek":
const { DeepSeekLLM } = require("../AiProviders/deepseek");
return new DeepSeekLLM(embedder, model);
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return new ApiPieLLM(embedder, model);
case "novita":
const { NovitaLLM } = require("../AiProviders/novita");
return new NovitaLLM(embedder, model);
case "xai":
const { XAiLLM } = require("../AiProviders/xai");
return new XAiLLM(embedder, model);
case "nvidia-nim":
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
return new NvidiaNimLLM(embedder, model);
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return new PPIOLLM(embedder, model);
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return new DellProAiStudioLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
);
}
}
|
Returns the LLMProvider with its embedder attached via system or via defined provider.
@param {{provider: string | null, model: string | null} | null} params - Initialize params for LLMs provider
@returns {BaseLLMProvider}
|
getLLMProvider
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
function getEmbeddingEngineSelection() {
const { NativeEmbedder } = require("../EmbeddingEngines/native");
const engineSelection = process.env.EMBEDDING_ENGINE;
switch (engineSelection) {
case "openai":
const { OpenAiEmbedder } = require("../EmbeddingEngines/openAi");
return new OpenAiEmbedder();
case "azure":
const {
AzureOpenAiEmbedder,
} = require("../EmbeddingEngines/azureOpenAi");
return new AzureOpenAiEmbedder();
case "localai":
const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
return new LocalAiEmbedder();
case "ollama":
const { OllamaEmbedder } = require("../EmbeddingEngines/ollama");
return new OllamaEmbedder();
case "native":
return new NativeEmbedder();
case "lmstudio":
const { LMStudioEmbedder } = require("../EmbeddingEngines/lmstudio");
return new LMStudioEmbedder();
case "cohere":
const { CohereEmbedder } = require("../EmbeddingEngines/cohere");
return new CohereEmbedder();
case "voyageai":
const { VoyageAiEmbedder } = require("../EmbeddingEngines/voyageAi");
return new VoyageAiEmbedder();
case "litellm":
const { LiteLLMEmbedder } = require("../EmbeddingEngines/liteLLM");
return new LiteLLMEmbedder();
case "mistral":
const { MistralEmbedder } = require("../EmbeddingEngines/mistral");
return new MistralEmbedder();
case "generic-openai":
const {
GenericOpenAiEmbedder,
} = require("../EmbeddingEngines/genericOpenAi");
return new GenericOpenAiEmbedder();
case "gemini":
const { GeminiEmbedder } = require("../EmbeddingEngines/gemini");
return new GeminiEmbedder();
default:
return new NativeEmbedder();
}
}
|
Returns the EmbedderProvider by itself to whatever is currently in the system settings.
@returns {BaseEmbedderProvider}
|
getEmbeddingEngineSelection
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
function getLLMProviderClass({ provider = null } = {}) {
switch (provider) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return OpenAiLLM;
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return AzureOpenAiLLM;
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return AnthropicLLM;
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return GeminiLLM;
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return LMStudioLLM;
case "localai":
const { LocalAiLLM } = require("../AiProviders/localAi");
return LocalAiLLM;
case "ollama":
const { OllamaAILLM } = require("../AiProviders/ollama");
return OllamaAILLM;
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return TogetherAiLLM;
case "fireworksai":
const { FireworksAiLLM } = require("../AiProviders/fireworksAi");
return FireworksAiLLM;
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return PerplexityLLM;
case "openrouter":
const { OpenRouterLLM } = require("../AiProviders/openRouter");
return OpenRouterLLM;
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return MistralLLM;
case "huggingface":
const { HuggingFaceLLM } = require("../AiProviders/huggingface");
return HuggingFaceLLM;
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return GroqLLM;
case "koboldcpp":
const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
return KoboldCPPLLM;
case "textgenwebui":
const { TextGenWebUILLM } = require("../AiProviders/textGenWebUI");
return TextGenWebUILLM;
case "cohere":
const { CohereLLM } = require("../AiProviders/cohere");
return CohereLLM;
case "litellm":
const { LiteLLM } = require("../AiProviders/liteLLM");
return LiteLLM;
case "generic-openai":
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
return GenericOpenAiLLM;
case "bedrock":
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
return AWSBedrockLLM;
case "deepseek":
const { DeepSeekLLM } = require("../AiProviders/deepseek");
return DeepSeekLLM;
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return ApiPieLLM;
case "novita":
const { NovitaLLM } = require("../AiProviders/novita");
return NovitaLLM;
case "xai":
const { XAiLLM } = require("../AiProviders/xai");
return XAiLLM;
case "nvidia-nim":
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
return NvidiaNimLLM;
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return PPIOLLM;
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return DellProAiStudioLLM;
default:
return null;
}
}
|
Returns the LLMProviderClass - this is a helper method to access static methods on a class
@param {{provider: string | null} | null} params - Initialize params for LLMs provider
@returns {BaseLLMProviderClass}
|
getLLMProviderClass
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
function getBaseLLMProviderModel({ provider = null } = {}) {
switch (provider) {
case "openai":
return process.env.OPEN_MODEL_PREF;
case "azure":
return process.env.OPEN_MODEL_PREF;
case "anthropic":
return process.env.ANTHROPIC_MODEL_PREF;
case "gemini":
return process.env.GEMINI_LLM_MODEL_PREF;
case "lmstudio":
return process.env.LMSTUDIO_MODEL_PREF;
case "localai":
return process.env.LOCAL_AI_MODEL_PREF;
case "ollama":
return process.env.OLLAMA_MODEL_PREF;
case "togetherai":
return process.env.TOGETHER_AI_MODEL_PREF;
case "fireworksai":
return process.env.FIREWORKS_AI_LLM_MODEL_PREF;
case "perplexity":
return process.env.PERPLEXITY_MODEL_PREF;
case "openrouter":
return process.env.OPENROUTER_MODEL_PREF;
case "mistral":
return process.env.MISTRAL_MODEL_PREF;
case "huggingface":
return null;
case "groq":
return process.env.GROQ_MODEL_PREF;
case "koboldcpp":
return process.env.KOBOLD_CPP_MODEL_PREF;
case "textgenwebui":
return process.env.TEXT_GEN_WEB_UI_API_KEY;
case "cohere":
return process.env.COHERE_MODEL_PREF;
case "litellm":
return process.env.LITE_LLM_MODEL_PREF;
case "generic-openai":
return process.env.GENERIC_OPEN_AI_EMBEDDING_API_KEY;
case "bedrock":
return process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE;
case "deepseek":
return process.env.DEEPSEEK_MODEL_PREF;
case "apipie":
return process.env.APIPIE_LLM_API_KEY;
case "novita":
return process.env.NOVITA_LLM_MODEL_PREF;
case "xai":
return process.env.XAI_LLM_MODEL_PREF;
case "nvidia-nim":
return process.env.NVIDIA_NIM_LLM_MODEL_PREF;
case "ppio":
return process.env.PPIO_API_KEY;
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
default:
return null;
}
}
|
Returns the defined model (if available) for the given provider.
@param {{provider: string | null} | null} params - Initialize params for LLMs provider
@returns {string | null}
|
getBaseLLMProviderModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
function maximumChunkLength() {
if (
!!process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH &&
!isNaN(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH) &&
Number(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH) > 1
)
return Number(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH);
return 1_000;
}
|
Returns the defined model (if available) for the given provider.
@param {{provider: string | null} | null} params - Initialize params for LLMs provider
@returns {string | null}
|
maximumChunkLength
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
function toChunks(arr, size) {
return Array.from({ length: Math.ceil(arr.length / size) }, (_v, i) =>
arr.slice(i * size, i * size + size)
);
}
|
Returns the defined model (if available) for the given provider.
@param {{provider: string | null} | null} params - Initialize params for LLMs provider
@returns {string | null}
|
toChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/index.js
|
MIT
|
constructor(model = "gpt-3.5-turbo") {
if (TokenManager.instance && TokenManager.currentModel === model) {
this.log("Returning existing instance for model:", model);
return TokenManager.instance;
}
this.model = model;
this.encoderName = this.#getEncodingFromModel(model);
this.encoder = getEncoding(this.encoderName);
TokenManager.instance = this;
TokenManager.currentModel = model;
this.log("Initialized new TokenManager instance for model:", model);
return this;
}
|
@class TokenManager
@notice
We cannot do estimation of tokens here like we do in the collector
because we need to know the model to do it.
Other issues are we also do reverse tokenization here for the chat history during cannonballing.
So here we are stuck doing the actual tokenization and encoding until we figure out what to do with prompt overflows.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/tiktoken.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/tiktoken.js
|
MIT
|
log(text, ...args) {
console.log(`\x1b[35m[TokenManager]\x1b[0m ${text}`, ...args);
}
|
@class TokenManager
@notice
We cannot do estimation of tokens here like we do in the collector
because we need to know the model to do it.
Other issues are we also do reverse tokenization here for the chat history during cannonballing.
So here we are stuck doing the actual tokenization and encoding until we figure out what to do with prompt overflows.
|
log
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/tiktoken.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/tiktoken.js
|
MIT
|
tokensFromString(input = "") {
try {
const tokens = this.encoder.encode(String(input), undefined, []);
return tokens;
} catch (e) {
console.error(e);
return [];
}
}
|
Pass in an empty array of disallowedSpecials to handle all tokens as text and to be tokenized.
@param {string} input
@returns {number[]}
|
tokensFromString
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/tiktoken.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/tiktoken.js
|
MIT
|
bytesFromTokens(tokens = []) {
const bytes = this.encoder.decode(tokens);
return bytes;
}
|
Converts an array of tokens back to a string.
@param {number[]} tokens
@returns {string}
|
bytesFromTokens
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/tiktoken.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/tiktoken.js
|
MIT
|
countFromString(input = "") {
const tokens = this.tokensFromString(input);
return tokens.length;
}
|
Counts the number of tokens in a string.
@param {string} input
@returns {number}
|
countFromString
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/tiktoken.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/tiktoken.js
|
MIT
|
statsFrom(input) {
if (typeof input === "string") return this.countFromString(input);
// What is going on here?
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb Item 6.
// The only option is to estimate. From repeated testing using the static values in the code we are always 2 off,
// which means as of Nov 1, 2023 the additional factor on ln: 476 changed from 3 to 5.
if (Array.isArray(input)) {
const perMessageFactorTokens = input.length * 3;
const tokensFromContent = input.reduce(
(a, b) => a + this.countFromString(b.content),
0
);
const diffCoefficient = 5;
return perMessageFactorTokens + tokensFromContent + diffCoefficient;
}
throw new Error("Not a supported tokenized format.");
}
|
Estimates the number of tokens in a string or array of strings.
@param {string | string[]} input
@returns {number}
|
statsFrom
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/tiktoken.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/tiktoken.js
|
MIT
|
async function looksLikePostgresConnectionString(connectionString = null) {
if (!connectionString || !connectionString.startsWith("postgresql://"))
return "Invalid Postgres connection string. Must start with postgresql://";
if (connectionString.includes(" "))
return "Invalid Postgres connection string. Must not contain spaces.";
return null;
}
|
Validates the Postgres connection string for the PGVector options.
@param {string} input - The Postgres connection string to validate.
@returns {string} - An error message if the connection string is invalid, otherwise null.
|
looksLikePostgresConnectionString
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
async function validatePGVectorConnectionString(key, prevValue, nextValue) {
const envKey = KEY_MAPPING[key].envKey;
if (prevValue === nextValue) return; // If the value is the same as the previous value, don't validate it.
if (!nextValue) return; // If the value is not set, don't validate it.
if (nextValue === process.env[envKey]) return; // If the value is the same as the current connection string, don't validate it.
const { PGVector } = require("../vectorDbProviders/pgvector");
const { error, success } = await PGVector.validateConnection({
connectionString: nextValue,
});
if (!success) return error;
// Set the ENV variable for the PGVector connection string early so we can use it in the table check.
process.env[envKey] = nextValue;
return null;
}
|
Validates the Postgres connection string for the PGVector options.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the connection string is invalid, otherwise null.
|
validatePGVectorConnectionString
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
async function validatePGVectorTableName(key, prevValue, nextValue) {
const envKey = KEY_MAPPING[key].envKey;
if (prevValue === nextValue) return; // If the value is the same as the previous value, don't validate it.
if (!nextValue) return; // If the value is not set, don't validate it.
if (nextValue === process.env[envKey]) return; // If the value is the same as the current table name, don't validate it.
if (!process.env.PGVECTOR_CONNECTION_STRING) return; // if connection string is not set, don't validate it since it will fail.
const { PGVector } = require("../vectorDbProviders/pgvector");
const { error, success } = await PGVector.validateConnection({
connectionString: process.env.PGVECTOR_CONNECTION_STRING,
tableName: nextValue,
});
if (!success) return error;
return null;
}
|
Validates the Postgres table name for the PGVector options.
- Table should not already exist in the database.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the table name is invalid, otherwise null.
|
validatePGVectorTableName
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
async function updateENV(newENVs = {}, force = false, userId = null) {
let error = "";
const validKeys = Object.keys(KEY_MAPPING);
const ENV_KEYS = Object.keys(newENVs).filter(
(key) => validKeys.includes(key) && !newENVs[key].includes("******") // strip out answers where the value is all asterisks
);
const newValues = {};
for (const key of ENV_KEYS) {
const {
envKey,
checks,
preUpdate = [],
postUpdate = [],
} = KEY_MAPPING[key];
const prevValue = process.env[envKey];
const nextValue = newENVs[key];
let errors = await executeValidationChecks(checks, nextValue, force);
// If there are any errors from regular simple validation checks
// exit early.
if (errors.length > 0) {
error += errors.join("\n");
break;
}
// Accumulate errors from preUpdate functions
errors = [];
for (const preUpdateFunc of preUpdate) {
const errorMsg = await preUpdateFunc(key, prevValue, nextValue);
if (!!errorMsg && typeof errorMsg === "string") errors.push(errorMsg);
}
// If there are any errors from preUpdate functions
// exit early.
if (errors.length > 0) {
error += errors.join("\n");
break;
}
newValues[key] = nextValue;
process.env[envKey] = nextValue;
for (const postUpdateFunc of postUpdate)
await postUpdateFunc(key, prevValue, nextValue);
}
await logChangesToEventLog(newValues, userId);
if (process.env.NODE_ENV === "production") dumpENV();
return { newValues, error: error?.length > 0 ? error : false };
}
|
Validates the Postgres table name for the PGVector options.
- Table should not already exist in the database.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the table name is invalid, otherwise null.
|
updateENV
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
async function executeValidationChecks(checks, value, force) {
const results = await Promise.all(
checks.map((validator) => validator(value, force))
);
return results.filter((err) => typeof err === "string");
}
|
Validates the Postgres table name for the PGVector options.
- Table should not already exist in the database.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the table name is invalid, otherwise null.
|
executeValidationChecks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
async function logChangesToEventLog(newValues = {}, userId = null) {
const { EventLogs } = require("../../models/eventLogs");
const eventMapping = {
LLMProvider: "update_llm_provider",
EmbeddingEngine: "update_embedding_engine",
VectorDB: "update_vector_db",
};
for (const [key, eventName] of Object.entries(eventMapping)) {
if (!newValues.hasOwnProperty(key)) continue;
await EventLogs.logEvent(eventName, {}, userId);
}
return;
}
|
Validates the Postgres table name for the PGVector options.
- Table should not already exist in the database.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the table name is invalid, otherwise null.
|
logChangesToEventLog
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
function dumpENV() {
const fs = require("fs");
const path = require("path");
const frozenEnvs = {};
const protectedKeys = [
...Object.values(KEY_MAPPING).map((values) => values.envKey),
// Manually Add Keys here which are not already defined in KEY_MAPPING
// and are either managed or manually set ENV key:values.
"STORAGE_DIR",
"SERVER_PORT",
// For persistent data encryption
"SIG_KEY",
"SIG_SALT",
// Password Schema Keys if present.
"PASSWORDMINCHAR",
"PASSWORDMAXCHAR",
"PASSWORDLOWERCASE",
"PASSWORDUPPERCASE",
"PASSWORDNUMERIC",
"PASSWORDSYMBOL",
"PASSWORDREQUIREMENTS",
// HTTPS SETUP KEYS
"ENABLE_HTTPS",
"HTTPS_CERT_PATH",
"HTTPS_KEY_PATH",
// Other Configuration Keys
"DISABLE_VIEW_CHAT_HISTORY",
// Simple SSO
"SIMPLE_SSO_ENABLED",
"SIMPLE_SSO_NO_LOGIN",
// Community Hub
"COMMUNITY_HUB_BUNDLE_DOWNLOADS_ENABLED",
// Nvidia NIM Keys that are automatically managed
"NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT",
// OCR Language Support
"TARGET_OCR_LANG",
// Collector API common ENV - allows bypassing URL validation checks
"COLLECTOR_ALLOW_ANY_IP",
];
// Simple sanitization of each value to prevent ENV injection via newline or quote escaping.
function sanitizeValue(value) {
const offendingChars =
/[\n\r\t\v\f\u0085\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000"'`#]/;
const firstOffendingCharIndex = value.search(offendingChars);
if (firstOffendingCharIndex === -1) return value;
return value.substring(0, firstOffendingCharIndex);
}
for (const key of protectedKeys) {
const envValue = process.env?.[key] || null;
if (!envValue) continue;
frozenEnvs[key] = process.env?.[key] || null;
}
var envResult = `# Auto-dump ENV from system call on ${new Date().toTimeString()}\n`;
envResult += Object.entries(frozenEnvs)
.map(([key, value]) => `${key}='${sanitizeValue(value)}'`)
.join("\n");
const envPath = path.join(__dirname, "../../.env");
fs.writeFileSync(envPath, envResult, { encoding: "utf8", flag: "w" });
return true;
}
|
Validates the Postgres table name for the PGVector options.
- Table should not already exist in the database.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the table name is invalid, otherwise null.
|
dumpENV
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
function sanitizeValue(value) {
const offendingChars =
/[\n\r\t\v\f\u0085\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000"'`#]/;
const firstOffendingCharIndex = value.search(offendingChars);
if (firstOffendingCharIndex === -1) return value;
return value.substring(0, firstOffendingCharIndex);
}
|
Validates the Postgres table name for the PGVector options.
- Table should not already exist in the database.
@param {string} key - The ENV key we are validating.
@param {string} prevValue - The previous value of the key.
@param {string} nextValue - The next value of the key.
@returns {string} - An error message if the table name is invalid, otherwise null.
|
sanitizeValue
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/updateENV.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/updateENV.js
|
MIT
|
function fillSourceWindow({
nDocs = 4, // Number of documents
searchResults = [], // Sources from similarity search
history = [], // Raw history
filterIdentifiers = [], // pinned document sources
} = config) {
const sources = [...searchResults];
if (sources.length >= nDocs || history.length === 0) {
return {
sources,
contextTexts: sources.map((src) => src.text),
};
}
const log = (text, ...args) => {
console.log(`\x1b[36m[fillSourceWindow]\x1b[0m ${text}`, ...args);
};
log(
`Need to backfill ${nDocs - searchResults.length} chunks to fill in the source window for RAG!`
);
const seenChunks = new Set(searchResults.map((source) => source.id));
// We need to reverse again because we need to iterate from bottom of array (most recent chats)
// Looking at this function by itself you may think that this loop could be extreme for long history chats,
// but this was already handled where `history` we derived. This comes from `recentChatHistory` which
// includes a limit for history (default: 20). So this loop does not look as extreme as on first glance.
for (const chat of history.reverse()) {
if (sources.length >= nDocs) {
log(
`Citations backfilled to ${nDocs} references from ${searchResults.length} original citations.`
);
break;
}
const chatSources =
safeJsonParse(chat.response, { sources: [] })?.sources || [];
if (!chatSources?.length || !Array.isArray(chatSources)) continue;
const validSources = chatSources.filter((source) => {
return (
filterIdentifiers.includes(sourceIdentifier(source)) == false && // source cannot be in current pins
source.hasOwnProperty("score") && // source cannot have come from a pinned document that was previously pinned
source.hasOwnProperty("text") && // source has a valid text property we can use
seenChunks.has(source.id) == false // is unique
);
});
for (const validSource of validSources) {
if (sources.length >= nDocs) break;
sources.push(validSource);
seenChunks.add(validSource.id);
}
}
return {
sources,
contextTexts: sources.map((src) => src.text),
};
}
|
Fill the sources window with the priority of
1. Pinned documents (handled prior to function)
2. VectorSearch results
3. prevSources in chat history - starting from most recent.
Ensuring the window always has the desired amount of sources so that followup questions
in any chat mode have relevant sources, but not infinite sources. This function is used during chatting
and allows follow-up questions within a query chat that otherwise would have zero sources and would fail.
The added benefit is that during regular RAG chat, we have better coherence of citations that otherwise would
also yield no results with no need for a ReRanker to run and take much longer to return a response.
The side effect of this is follow-up unrelated questions now have citations that would look totally irrelevant, however
we would rather optimize on the correctness of a response vs showing extraneous sources during a response. Given search
results always take a priority a good unrelated question that produces RAG results will still function as desired and due to previous
history backfill sources "changing context" mid-chat is handled appropriately.
example:
---previous implementation---
prompt 1: "What is anythingllm?" -> possibly get 4 good sources
prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> bad response due to bad context mgmt
---next implementation---
prompt 1: "What is anythingllm?" -> possibly get 4 good sources
prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> backfill with 3 good sources from previous -> much better response
@param {Object} config - params to call
@param {object} config.nDocs = fill size of the window
@param {object} config.searchResults = vector `similarityResponse` results for .sources
@param {object[]} config.history - rawHistory of chat containing sources
@param {string[]} config.filterIdentifiers - Pinned document identifiers to prevent duplicate context
@returns {{
contextTexts: string[],
sources: object[],
}} - Array of sources that should be added to window
|
fillSourceWindow
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/index.js
|
MIT
|
log = (text, ...args) => {
console.log(`\x1b[36m[fillSourceWindow]\x1b[0m ${text}`, ...args);
}
|
Fill the sources window with the priority of
1. Pinned documents (handled prior to function)
2. VectorSearch results
3. prevSources in chat history - starting from most recent.
Ensuring the window always has the desired amount of sources so that followup questions
in any chat mode have relevant sources, but not infinite sources. This function is used during chatting
and allows follow-up questions within a query chat that otherwise would have zero sources and would fail.
The added benefit is that during regular RAG chat, we have better coherence of citations that otherwise would
also yield no results with no need for a ReRanker to run and take much longer to return a response.
The side effect of this is follow-up unrelated questions now have citations that would look totally irrelevant, however
we would rather optimize on the correctness of a response vs showing extraneous sources during a response. Given search
results always take a priority a good unrelated question that produces RAG results will still function as desired and due to previous
history backfill sources "changing context" mid-chat is handled appropriately.
example:
---previous implementation---
prompt 1: "What is anythingllm?" -> possibly get 4 good sources
prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> bad response due to bad context mgmt
---next implementation---
prompt 1: "What is anythingllm?" -> possibly get 4 good sources
prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> backfill with 3 good sources from previous -> much better response
@param {Object} config - params to call
@param {object} config.nDocs = fill size of the window
@param {object} config.searchResults = vector `similarityResponse` results for .sources
@param {object[]} config.history - rawHistory of chat containing sources
@param {string[]} config.filterIdentifiers - Pinned document identifiers to prevent duplicate context
@returns {{
contextTexts: string[],
sources: object[],
}} - Array of sources that should be added to window
|
log
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/index.js
|
MIT
|
log = (text, ...args) => {
console.log(`\x1b[36m[fillSourceWindow]\x1b[0m ${text}`, ...args);
}
|
Fill the sources window with the priority of
1. Pinned documents (handled prior to function)
2. VectorSearch results
3. prevSources in chat history - starting from most recent.
Ensuring the window always has the desired amount of sources so that followup questions
in any chat mode have relevant sources, but not infinite sources. This function is used during chatting
and allows follow-up questions within a query chat that otherwise would have zero sources and would fail.
The added benefit is that during regular RAG chat, we have better coherence of citations that otherwise would
also yield no results with no need for a ReRanker to run and take much longer to return a response.
The side effect of this is follow-up unrelated questions now have citations that would look totally irrelevant, however
we would rather optimize on the correctness of a response vs showing extraneous sources during a response. Given search
results always take a priority a good unrelated question that produces RAG results will still function as desired and due to previous
history backfill sources "changing context" mid-chat is handled appropriately.
example:
---previous implementation---
prompt 1: "What is anythingllm?" -> possibly get 4 good sources
prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> bad response due to bad context mgmt
---next implementation---
prompt 1: "What is anythingllm?" -> possibly get 4 good sources
prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> backfill with 3 good sources from previous -> much better response
@param {Object} config - params to call
@param {object} config.nDocs = fill size of the window
@param {object} config.searchResults = vector `similarityResponse` results for .sources
@param {object[]} config.history - rawHistory of chat containing sources
@param {string[]} config.filterIdentifiers - Pinned document identifiers to prevent duplicate context
@returns {{
contextTexts: string[],
sources: object[],
}} - Array of sources that should be added to window
|
log
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/index.js
|
MIT
|
static countTokens(messages = []) {
try {
return this.tokenManager.statsFrom(messages);
} catch (e) {
return 0;
}
}
|
Counts the tokens in the messages.
@param {Array<{content: string}>} messages - the messages sent to the LLM so we can calculate the prompt tokens since most providers do not return this on stream
@returns {number}
|
countTokens
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/LLMPerformanceMonitor.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/LLMPerformanceMonitor.js
|
MIT
|
static measureAsyncFunction(func) {
return (async () => {
const start = Date.now();
const output = await func; // is a promise
const end = Date.now();
return { output, duration: (end - start) / 1000 };
})();
}
|
Wraps a function and logs the duration (in seconds) of the function call.
@param {Function} func
@returns {Promise<{output: any, duration: number}>}
|
measureAsyncFunction
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/LLMPerformanceMonitor.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/LLMPerformanceMonitor.js
|
MIT
|
static async measureStream(
func,
messages = [],
runPromptTokenCalculation = true
) {
const stream = await func;
stream.start = Date.now();
stream.duration = 0;
stream.metrics = {
completion_tokens: 0,
prompt_tokens: runPromptTokenCalculation ? this.countTokens(messages) : 0,
total_tokens: 0,
outputTps: 0,
duration: 0,
};
stream.endMeasurement = (reportedUsage = {}) => {
const end = Date.now();
const duration = (end - stream.start) / 1000;
// Merge the reported usage with the existing metrics
// so the math in the metrics object is correct when calculating
stream.metrics = {
...stream.metrics,
...reportedUsage,
};
stream.metrics.total_tokens =
stream.metrics.prompt_tokens + (stream.metrics.completion_tokens || 0);
stream.metrics.outputTps = stream.metrics.completion_tokens / duration;
stream.metrics.duration = duration;
return stream.metrics;
};
return stream;
}
|
Wraps a completion stream and and attaches a start time and duration property to the stream.
Also attaches an `endMeasurement` method to the stream that will calculate the duration of the stream and metrics.
@param {Promise<OpenAICompatibleStream>} func
@param {Messages} messages - the messages sent to the LLM so we can calculate the prompt tokens since most providers do not return this on stream
@param {boolean} runPromptTokenCalculation - whether to run the prompt token calculation to estimate the `prompt_tokens` metric. This is useful for providers that do not return this on stream.
@returns {Promise<MonitoredStream>}
|
measureStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/LLMPerformanceMonitor.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/LLMPerformanceMonitor.js
|
MIT
|
function handleDefaultStreamResponseV2(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
// Why are we doing this?
// OpenAI do enable the usage metrics in the stream response but:
// 1. This parameter is not available in our current API version (TODO: update)
// 2. The usage metrics are not available in _every_ provider that uses this function
// 3. We need to track the usage metrics for every provider that uses this function - not just OpenAI
// Other keys are added by the LLMPerformanceMonitor.measureStream method
let hasUsageMetrics = false;
let usage = {
// prompt_tokens can be in this object if the provider supports it - otherwise we manually count it
// When the stream is created in the LLMProviders `streamGetChatCompletion` `LLMPerformanceMonitor.measureStream` call.
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// Now handle the chunks from the streamed response and append to fullText.
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
// If we see usage metrics in the chunk, we can use them directly
// instead of estimating them, but we only want to assign values if
// the response object is the exact same key:value pair we expect.
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
// Either way, the key `finish_reason` must be present to determine ending chunk.
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('./LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleDefaultStreamResponseV2
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('./LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('./LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
function convertToChatHistory(history = []) {
const formattedHistory = [];
for (const record of history) {
const { prompt, response, createdAt, feedbackScore = null, id } = record;
const data = JSON.parse(response);
// In the event that a bad response was stored - we should skip its entire record
// because it was likely an error and cannot be used in chats and will fail to render on UI.
if (typeof prompt !== "string") {
console.log(
`[convertToChatHistory] ChatHistory #${record.id} prompt property is not a string - skipping record.`
);
continue;
} else if (typeof data.text !== "string") {
console.log(
`[convertToChatHistory] ChatHistory #${record.id} response.text property is not a string - skipping record.`
);
continue;
}
formattedHistory.push([
{
role: "user",
content: prompt,
sentAt: moment(createdAt).unix(),
attachments: data?.attachments ?? [],
chatId: id,
},
{
type: data?.type || "chart",
role: "assistant",
content: data.text,
sources: data.sources || [],
chatId: id,
sentAt: moment(createdAt).unix(),
feedbackScore,
metrics: data?.metrics || {},
},
]);
}
return formattedHistory.flat();
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('./LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
convertToChatHistory
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
function convertToPromptHistory(history = []) {
const formattedHistory = [];
for (const record of history) {
const { prompt, response } = record;
const data = JSON.parse(response);
// In the event that a bad response was stored - we should skip its entire record
// because it was likely an error and cannot be used in chats and will fail to render on UI.
if (typeof prompt !== "string") {
console.log(
`[convertToPromptHistory] ChatHistory #${record.id} prompt property is not a string - skipping record.`
);
continue;
} else if (typeof data.text !== "string") {
console.log(
`[convertToPromptHistory] ChatHistory #${record.id} response.text property is not a string - skipping record.`
);
continue;
}
formattedHistory.push([
{
role: "user",
content: prompt,
// if there are attachments, add them as a property to the user message so we can reuse them in chat history later if supported by the llm.
...(data?.attachments?.length > 0
? { attachments: data?.attachments }
: {}),
},
{
role: "assistant",
content: data.text,
},
]);
}
return formattedHistory.flat();
}
|
Converts a chat history to a prompt history.
@param {Object[]} history - The chat history to convert
@returns {{role: string, content: string, attachments?: import("..").Attachment}[]}
|
convertToPromptHistory
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
function writeResponseChunk(response, data) {
response.write(`data: ${JSON.stringify(data)}\n\n`);
return;
}
|
Converts a chat history to a prompt history.
@param {Object[]} history - The chat history to convert
@returns {{role: string, content: string, attachments?: import("..").Attachment}[]}
|
writeResponseChunk
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
function formatChatHistory(
chatHistory = [],
formatterFunction,
mode = "asProperty"
) {
return chatHistory.map((historicalMessage) => {
if (
historicalMessage?.role !== "user" || // Only user messages can have attachments
!historicalMessage?.attachments || // If there are no attachments, we can skip this
!historicalMessage.attachments.length // If there is an array but it is empty, we can skip this
)
return historicalMessage;
// Some providers, like Ollama, expect the content to be embedded in the message object.
if (mode === "spread") {
return {
role: historicalMessage.role,
...formatterFunction({
userPrompt: historicalMessage.content,
attachments: historicalMessage.attachments,
}),
};
}
// Most providers expect the content to be a property of the message object formatted like OpenAI models.
return {
role: historicalMessage.role,
content: formatterFunction({
userPrompt: historicalMessage.content,
attachments: historicalMessage.attachments,
}),
};
});
}
|
Formats the chat history to re-use attachments in the chat history
that might have existed in the conversation earlier.
@param {{role:string, content:string, attachments?: Object[]}[]} chatHistory
@param {function} formatterFunction - The function to format the chat history from the llm provider
@param {('asProperty'|'spread')} mode - "asProperty" or "spread". Determines how the content is formatted in the message object.
@returns {object[]}
|
formatChatHistory
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/helpers/chat/responses.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/helpers/chat/responses.js
|
MIT
|
function setLogger() {
return new Logger().logger;
}
|
Sets and overrides Console methods for logging when called.
This is a singleton method and will not create multiple loggers.
@returns {winston.Logger | console} - instantiated logger interface.
|
setLogger
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/logger/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/logger/index.js
|
MIT
|
async activeMCPServers() {
await this.bootMCPServers();
return Object.keys(this.mcps).flatMap((name) => `@@mcp_${name}`);
}
|
Get all of the active MCP servers as plugins we can load into agents.
This will also boot all MCP servers if they have not been started yet.
@returns {Promise<string[]>} Array of flow names in @@mcp_{name} format
|
activeMCPServers
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/MCP/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/MCP/index.js
|
MIT
|
async convertServerToolsToPlugins(name, _aibitat = null) {
const mcp = this.mcps[name];
if (!mcp) return null;
const tools = (await mcp.listTools()).tools;
if (!tools.length) return null;
const plugins = [];
for (const tool of tools) {
plugins.push({
name: `${name}-${tool.name}`,
description: tool.description,
plugin: function () {
return {
name: `${name}-${tool.name}`,
setup: (aibitat) => {
aibitat.function({
super: aibitat,
name: `${name}-${tool.name}`,
controller: new AbortController(),
description: tool.description,
examples: [],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
...tool.inputSchema,
},
handler: async function (args = {}) {
try {
aibitat.handlerProps.log(
`Executing MCP server: ${name}:${tool.name} with args:`,
args
);
aibitat.introspect(
`Executing MCP server: ${name} with ${JSON.stringify(args, null, 2)}`
);
const result = await mcp.callTool({
name: tool.name,
arguments: args,
});
aibitat.handlerProps.log(
`MCP server: ${name}:${tool.name} completed successfully`,
result
);
aibitat.introspect(
`MCP server: ${name}:${tool.name} completed successfully`
);
return typeof result === "object"
? JSON.stringify(result)
: String(result);
} catch (error) {
aibitat.handlerProps.log(
`MCP server: ${name}:${tool.name} failed with error:`,
error
);
aibitat.introspect(
`MCP server: ${name}:${tool.name} failed with error:`,
error
);
return `The tool ${name}:${tool.name} failed with error: ${error?.message || "An unknown error occurred"}`;
}
},
});
},
};
},
toolName: `${name}:${tool.name}`,
});
}
return plugins;
}
|
Convert an MCP server name to an AnythingLLM Agent plugin
@param {string} name - The base name of the MCP server to convert - not the tool name. eg: `docker-mcp` not `docker-mcp:list-containers`
@param {Object} aibitat - The aibitat object to pass to the plugin
@returns {Promise<{name: string, description: string, plugin: Function}[]|null>} Array of plugin configurations or null if not found
|
convertServerToolsToPlugins
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/MCP/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/MCP/index.js
|
MIT
|
async servers() {
await this.bootMCPServers();
const servers = [];
for (const [name, result] of Object.entries(this.mcpLoadingResults)) {
const config = this.mcpServerConfigs.find((s) => s.name === name);
if (result.status === "failed") {
servers.push({
name,
config: config?.server || null,
running: false,
tools: [],
error: result.message,
process: null,
});
continue;
}
const mcp = this.mcps[name];
if (!mcp) {
delete this.mcpLoadingResults[name];
delete this.mcps[name];
continue;
}
const online = !!(await mcp.ping());
const tools = online ? (await mcp.listTools()).tools : [];
servers.push({
name,
config: config?.server || null,
running: online,
tools,
error: null,
process: {
pid: mcp.transport?.process?.pid || null,
},
});
}
return servers;
}
|
Returns the MCP servers that were loaded or attempted to be loaded
so that we can display them in the frontend for review or error logging.
@returns {Promise<{
name: string,
running: boolean,
tools: {name: string, description: string, inputSchema: Object}[],
process: {pid: number, cmd: string}|null,
error: string|null
}[]>} - The active MCP servers
|
servers
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/MCP/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/MCP/index.js
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.