code
stringlengths
24
2.07M
docstring
stringlengths
25
85.3k
func_name
stringlengths
1
92
language
stringclasses
1 value
repo
stringlengths
5
64
path
stringlengths
4
172
url
stringlengths
44
218
license
stringclasses
7 values
constructor(config = {}) { const { model = "llama3-8b-8192" } = config; super(); const client = new OpenAI({ baseURL: "https://api.groq.com/openai/v1", apiKey: process.env.GROQ_API_KEY, maxRetries: 3, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the GroqAI provider. We wrap Groq in UnTooled because its tool-calling built in is quite bad and wasteful.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/groq.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/groq.js
MIT
get client() { return this._client; }
The agent provider for the GroqAI provider. We wrap Groq in UnTooled because its tool-calling built in is quite bad and wasteful.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/groq.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/groq.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/groq.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/groq.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since LMStudio has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/groq.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/groq.js
MIT
constructor(_config = {}) { super(); const model = process.env.KOBOLD_CPP_MODEL_PREF ?? null; const client = new OpenAI({ baseURL: process.env.KOBOLD_CPP_BASE_PATH?.replace(/\/+$/, ""), apiKey: null, maxRetries: 3, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the KoboldCPP provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/koboldcpp.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/koboldcpp.js
MIT
get client() { return this._client; }
The agent provider for the KoboldCPP provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/koboldcpp.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/koboldcpp.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/koboldcpp.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/koboldcpp.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since KoboldCPP has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/koboldcpp.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/koboldcpp.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/litellm.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/litellm.js
MIT
getCost(_usage) { return 0; }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/litellm.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/litellm.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/lmstudio.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/lmstudio.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since LMStudio has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/lmstudio.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/lmstudio.js
MIT
constructor(config = {}) { const { model = null } = config; super(); const client = new OpenAI({ baseURL: process.env.LOCAL_AI_BASE_PATH, apiKey: process.env.LOCAL_AI_API_KEY ?? null, maxRetries: 3, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the LocalAI provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/localai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/localai.js
MIT
get client() { return this._client; }
The agent provider for the LocalAI provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/localai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/localai.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0 }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/localai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/localai.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since LocalAI has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/localai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/localai.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/mistral.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/mistral.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/mistral.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/mistral.js
MIT
constructor(config = {}) { const { model = "deepseek/deepseek-r1" } = config; super(); const client = new OpenAI({ baseURL: "https://api.novita.ai/v3/openai", apiKey: process.env.NOVITA_LLM_API_KEY, maxRetries: 3, defaultHeaders: { "HTTP-Referer": "https://anythingllm.com", "X-Novita-Source": "anythingllm", }, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the Novita AI provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/novita.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/novita.js
MIT
get client() { return this._client; }
The agent provider for the Novita AI provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/novita.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/novita.js
MIT
async complete(messages, functions = []) { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog("Will assume chat completion without tool call inputs."); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/novita.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/novita.js
MIT
getCost() { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since Novita AI has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/novita.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/novita.js
MIT
constructor(config = {}) { const { model } = config; super(); const client = new OpenAI({ baseURL: process.env.NVIDIA_NIM_LLM_BASE_PATH, apiKey: null, maxRetries: 0, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the Nvidia NIM provider. We wrap Nvidia NIM in UnTooled because its tool-calling may not be supported for specific models and this normalizes that.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/nvidiaNim.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/nvidiaNim.js
MIT
get client() { return this._client; }
The agent provider for the Nvidia NIM provider. We wrap Nvidia NIM in UnTooled because its tool-calling may not be supported for specific models and this normalizes that.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/nvidiaNim.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/nvidiaNim.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/nvidiaNim.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/nvidiaNim.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/nvidiaNim.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/nvidiaNim.js
MIT
constructor(config = {}) { const { // options = {}, model = null, } = config; super(); const headers = process.env.OLLAMA_AUTH_TOKEN ? { Authorization: `Bearer ${process.env.OLLAMA_AUTH_TOKEN}` } : {}; this._client = new Ollama({ host: process.env.OLLAMA_BASE_PATH, headers: headers, }); this.model = model; this.verbose = true; }
The agent provider for the Ollama provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ollama.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ollama.js
MIT
get client() { return this._client; }
The agent provider for the Ollama provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ollama.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ollama.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat({ model: this.model, messages: this.cleanMsgs(messages), options: { use_mlock: true, temperature: 0.5, }, }); completion = response.message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ollama.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ollama.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since LMStudio has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ollama.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ollama.js
MIT
constructor(config = {}) { const { options = { apiKey: process.env.OPEN_AI_KEY, maxRetries: 3, }, model = "gpt-4o", } = config; const client = new OpenAI(options); super(client); this.model = model; }
The agent provider for the OpenAI API. By default, the model is set to 'gpt-3.5-turbo'.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openai.js
MIT
async complete(messages, functions = []) { try { const response = await this.client.chat.completions.create({ model: this.model, // stream: true, messages, ...(Array.isArray(functions) && functions?.length > 0 ? { functions } : {}), }); // Right now, we only support one completion, // so we just take the first one in the list const completion = response.choices[0].message; const cost = this.getCost(response.usage); // treat function calls if (completion.function_call) { let functionArgs = {}; try { functionArgs = JSON.parse(completion.function_call.arguments); } catch (error) { // call the complete function again in case it gets a json error return this.complete( [ ...messages, { role: "function", name: completion.function_call.name, function_call: completion.function_call, content: error?.message, }, ], functions ); } // console.log(completion, { functionArgs }) return { result: null, functionCall: { name: completion.function_call.name, arguments: functionArgs, }, cost, }; } return { result: completion.content, cost, }; } catch (error) { // If invalid Auth error we need to abort because no amount of waiting // will make auth better. if (error instanceof OpenAI.AuthenticationError) throw error; if ( error instanceof OpenAI.RateLimitError || error instanceof OpenAI.InternalServerError || error instanceof OpenAI.APIError // Also will catch AuthenticationError!!! ) { throw new RetryError(error.message); } throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the OpenAI API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openai.js
MIT
getCost(usage) { if (!usage) { return Number.NaN; } // regex to remove the version number from the model const modelBase = this.model.replace(/-(\d{4})$/, ""); if (!(modelBase in OpenAIProvider.COST_PER_TOKEN)) { return Number.NaN; } const costPerToken = OpenAIProvider.COST_PER_TOKEN?.[modelBase]; const inputCost = (usage.prompt_tokens / 1000) * costPerToken.input; const outputCost = (usage.completion_tokens / 1000) * costPerToken.output; return inputCost + outputCost; }
Get the cost of the completion. @param usage The completion to get the cost for. @returns The cost of the completion.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openai.js
MIT
constructor(config = {}) { const { model = "openrouter/auto" } = config; super(); const client = new OpenAI({ baseURL: "https://openrouter.ai/api/v1", apiKey: process.env.OPENROUTER_API_KEY, maxRetries: 3, defaultHeaders: { "HTTP-Referer": "https://anythingllm.com", "X-Title": "AnythingLLM", }, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the OpenRouter provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openrouter.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openrouter.js
MIT
get client() { return this._client; }
The agent provider for the OpenRouter provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openrouter.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openrouter.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openrouter.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openrouter.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since OpenRouter has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/openrouter.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/openrouter.js
MIT
constructor(config = {}) { super(); const { model = "sonar-small-online" } = config; const client = new OpenAI({ baseURL: "https://api.perplexity.ai", apiKey: process.env.PERPLEXITY_API_KEY ?? null, maxRetries: 3, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the Perplexity provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/perplexity.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/perplexity.js
MIT
get client() { return this._client; }
The agent provider for the Perplexity provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/perplexity.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/perplexity.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/perplexity.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/perplexity.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/perplexity.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/perplexity.js
MIT
constructor(config = {}) { const { model = "qwen/qwen2.5-32b-instruct" } = config; super(); const client = new OpenAI({ baseURL: "https://api.ppinfra.com/v3/openai", apiKey: process.env.PPIO_API_KEY, maxRetries: 3, defaultHeaders: { "HTTP-Referer": "https://anythingllm.com", "X-API-Source": "anythingllm", }, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the PPIO AI provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ppio.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ppio.js
MIT
get client() { return this._client; }
The agent provider for the PPIO AI provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ppio.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ppio.js
MIT
async complete(messages, functions = null) { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog("Will assume chat completion without tool call inputs."); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ppio.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ppio.js
MIT
getCost() { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since PPIO has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/ppio.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ppio.js
MIT
constructor(_config = {}) { super(); const client = new OpenAI({ baseURL: process.env.TEXT_GEN_WEB_UI_BASE_PATH, apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? null, maxRetries: 3, }); this._client = client; this.model = null; // text-web-gen-ui does not have a model pref. this.verbose = true; }
The agent provider for the Oobabooga provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/textgenwebui.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/textgenwebui.js
MIT
get client() { return this._client; }
The agent provider for the Oobabooga provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/textgenwebui.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/textgenwebui.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/textgenwebui.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/textgenwebui.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since KoboldCPP has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/textgenwebui.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/textgenwebui.js
MIT
constructor(config = {}) { const { model = "mistralai/Mistral-7B-Instruct-v0.1" } = config; super(); const client = new OpenAI({ baseURL: "https://api.together.xyz/v1", apiKey: process.env.TOGETHER_AI_API_KEY, maxRetries: 3, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the TogetherAI provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/togetherai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/togetherai.js
MIT
get client() { return this._client; }
The agent provider for the TogetherAI provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/togetherai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/togetherai.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/togetherai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/togetherai.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion. Stubbed since LMStudio has no cost basis.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/togetherai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/togetherai.js
MIT
constructor(config = {}) { const { model = "grok-beta" } = config; super(); const client = new OpenAI({ baseURL: "https://api.x.ai/v1", apiKey: process.env.XAI_LLM_API_KEY, maxRetries: 3, }); this._client = client; this.model = model; this.verbose = true; }
The agent provider for the xAI provider.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/xai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/xai.js
MIT
get client() { return this._client; }
The agent provider for the xAI provider.
client
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/xai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/xai.js
MIT
async complete(messages, functions = []) { try { let completion; if (functions.length > 0) { const { toolCall, text } = await this.functionCall( messages, functions, this.#handleFunctionCallChat.bind(this) ); if (toolCall !== null) { this.providerLog(`Valid tool call found - running ${toolCall.name}.`); this.deduplicator.trackRun(toolCall.name, toolCall.arguments); return { result: null, functionCall: { name: toolCall.name, arguments: toolCall.arguments, }, cost: 0, }; } completion = { content: text }; } if (!completion?.content) { this.providerLog( "Will assume chat completion without tool call inputs." ); const response = await this.client.chat.completions.create({ model: this.model, messages: this.cleanMsgs(messages), }); completion = response.choices[0].message; } // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent // from calling the exact same function over and over in a loop within a single chat exchange // _but_ we should enable it to call previously used tools in a new chat interaction. this.deduplicator.reset("runs"); return { result: completion.content, cost: 0, }; } catch (error) { throw error; } }
Create a completion based on the received messages. @param messages A list of messages to send to the API. @param functions @returns The completion.
complete
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/xai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/xai.js
MIT
getCost(_usage) { return 0; }
Get the cost of the completion. @param _usage The completion to get the cost for. @returns The cost of the completion.
getCost
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/xai.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/xai.js
MIT
compareArrays(arr1, arr2, opts) { function vKey(i, v) { return (opts?.enforceOrder ? `${i}-` : "") + `${typeof v}-${v}`; } if (arr1.length !== arr2.length) return false; const d1 = {}; const d2 = {}; for (let i = arr1.length - 1; i >= 0; i--) { d1[vKey(i, arr1[i])] = true; d2[vKey(i, arr2[i])] = true; } for (let i = arr1.length - 1; i >= 0; i--) { const v = vKey(i, arr1[i]); if (d1[v] !== d2[v]) return false; } for (let i = arr2.length - 1; i >= 0; i--) { const v = vKey(i, arr2[i]); if (d1[v] !== d2[v]) return false; } return true; }
Check if two arrays of strings or numbers have the same values @param {string[]|number[]} arr1 @param {string[]|number[]} arr2 @param {Object} [opts] @param {boolean} [opts.enforceOrder] - By default (false), the order of the values in the arrays doesn't matter. @return {boolean}
compareArrays
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/helpers/untooled.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/helpers/untooled.js
MIT
function vKey(i, v) { return (opts?.enforceOrder ? `${i}-` : "") + `${typeof v}-${v}`; }
Check if two arrays of strings or numbers have the same values @param {string[]|number[]} arr1 @param {string[]|number[]} arr2 @param {Object} [opts] @param {boolean} [opts.enforceOrder] - By default (false), the order of the values in the arrays doesn't matter. @return {boolean}
vKey
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/helpers/untooled.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/helpers/untooled.js
MIT
validFuncCall(functionCall = {}, functions = []) { if ( !functionCall || !functionCall?.hasOwnProperty("name") || !functionCall?.hasOwnProperty("arguments") ) { return { valid: false, reason: "Missing name or arguments in function call.", }; } const foundFunc = functions.find((def) => def.name === functionCall.name); if (!foundFunc) { return { valid: false, reason: "Function name does not exist." }; } const props = Object.keys(foundFunc.parameters.properties); const fProps = Object.keys(functionCall.arguments); if (!this.compareArrays(props, fProps)) { return { valid: false, reason: "Invalid argument schema match." }; } return { valid: true, reason: null }; }
Check if two arrays of strings or numbers have the same values @param {string[]|number[]} arr1 @param {string[]|number[]} arr2 @param {Object} [opts] @param {boolean} [opts.enforceOrder] - By default (false), the order of the values in the arrays doesn't matter. @return {boolean}
validFuncCall
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/helpers/untooled.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/helpers/untooled.js
MIT
async functionCall(messages, functions, chatCb = null) { const history = [...messages].filter((msg) => ["user", "assistant"].includes(msg.role) ); if (history[history.length - 1].role !== "user") return null; const response = await chatCb({ messages: [ { content: `You are a program which picks the most optimal function and parameters to call. DO NOT HAVE TO PICK A FUNCTION IF IT WILL NOT HELP ANSWER OR FULFILL THE USER'S QUERY. When a function is selection, respond in JSON with no additional text. When there is no relevant function to call - return with a regular chat text response. Your task is to pick a **single** function that we will use to call, if any seem useful or relevant for the user query. All JSON responses should have two keys. 'name': this is the name of the function name to call. eg: 'web-scraper', 'rag-memory', etc.. 'arguments': this is an object with the function properties to invoke the function. DO NOT INCLUDE ANY OTHER KEYS IN JSON RESPONSES. Here are the available tools you can use an examples of a query and response so you can understand how each one works. ${this.showcaseFunctions(functions)} Now pick a function if there is an appropriate one to use given the last user message and the given conversation so far.`, role: "system", }, ...history, ], }); const call = safeJsonParse(response, null); if (call === null) return { toolCall: null, text: response }; // failed to parse, so must be text. const { valid, reason } = this.validFuncCall(call, functions); if (!valid) { this.providerLog(`Invalid function tool call: ${reason}.`); return { toolCall: null, text: null }; } if (this.deduplicator.isDuplicate(call.name, call.arguments)) { this.providerLog( `Function tool with exact arguments has already been called this stack.` ); return { toolCall: null, text: null }; } return { toolCall: call, text: null }; }
Check if two arrays of strings or numbers have the same values @param {string[]|number[]} arr1 @param {string[]|number[]} arr2 @param {Object} [opts] @param {boolean} [opts.enforceOrder] - By default (false), the order of the values in the arrays doesn't matter. @return {boolean}
functionCall
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/providers/helpers/untooled.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/helpers/untooled.js
MIT
reset(type = "runs") { switch (type) { case "runs": this.#hashes = {}; break; case "cooldowns": this.#cooldowns = {}; break; case "uniques": this.#uniques = {}; break; } return; }
Resets the object property for this instance of the Deduplicator class @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
reset
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/dedupe.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/dedupe.js
MIT
startCooldown( key, parameters = { cooldownInMs: DEFAULT_COOLDOWN_MS, } ) { this.#cooldowns[key] = Number(new Date()) + Number(parameters.cooldownInMs); }
Resets the object property for this instance of the Deduplicator class @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
startCooldown
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/dedupe.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/dedupe.js
MIT
isOnCooldown(key) { if (!this.#cooldowns.hasOwnProperty(key)) return false; return Number(new Date()) <= this.#cooldowns[key]; }
Resets the object property for this instance of the Deduplicator class @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
isOnCooldown
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/dedupe.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/dedupe.js
MIT
isUnique(key) { return !this.#uniques.hasOwnProperty(key); }
Resets the object property for this instance of the Deduplicator class @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
isUnique
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/dedupe.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/dedupe.js
MIT
removeUniqueConstraint(key) { delete this.#uniques[key]; }
Resets the object property for this instance of the Deduplicator class @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
removeUniqueConstraint
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/dedupe.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/dedupe.js
MIT
markUnique(key) { this.#uniques[key] = Number(new Date()); }
Resets the object property for this instance of the Deduplicator class @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
markUnique
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/dedupe.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/dedupe.js
MIT
async function summarizeContent({ provider = "openai", model = null, controllerSignal, content, }) { const llm = Provider.LangChainChatModel(provider, { temperature: 0, model: model, }); const textSplitter = new RecursiveCharacterTextSplitter({ separators: ["\n\n", "\n"], chunkSize: 10000, chunkOverlap: 500, }); const docs = await textSplitter.createDocuments([content]); const mapPrompt = ` Write a detailed summary of the following text for a research purpose: "{text}" SUMMARY: `; const mapPromptTemplate = new PromptTemplate({ template: mapPrompt, inputVariables: ["text"], }); // This convenience function creates a document chain prompted to summarize a set of documents. const chain = loadSummarizationChain(llm, { type: "map_reduce", combinePrompt: mapPromptTemplate, combineMapPrompt: mapPromptTemplate, verbose: process.env.NODE_ENV === "development", }); const res = await chain.call({ ...(controllerSignal ? { signal: controllerSignal } : {}), input_documents: docs, }); return res.text; }
Summarize content using LLM LC-Chain call @param {LCSummarizationConfig} The LLM to use for summarization (inherited) @returns {Promise<string>} The summarized content.
summarizeContent
javascript
Mintplex-Labs/anything-llm
server/utils/agents/aibitat/utils/summarize.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/utils/summarize.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], // This is the specific attachment for only this prompt }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { try { const result = await LLMPerformanceMonitor.measureAsyncFunction( this.anthropic.messages.create({ model: this.model, max_tokens: 4096, system: messages[0].content, // Strip out the system message messages: messages.slice(1), // Pop off the system message temperature: Number(temperature ?? this.defaultTemp), }) ); const promptTokens = result.output.usage.input_tokens; const completionTokens = result.output.usage.output_tokens; return { textResponse: result.output.content[0].text, metrics: { prompt_tokens: promptTokens, completion_tokens: completionTokens, total_tokens: promptTokens + completionTokens, outputTps: completionTokens / result.duration, duration: result.duration, }, }; } catch (error) { console.log(error); return { textResponse: error, metrics: {} }; } }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.anthropic.messages.stream({ model: this.model, max_tokens: 4096, system: messages[0].content, // Strip out the system message messages: messages.slice(1), // Pop off the system message temperature: Number(temperature ?? this.defaultTemp), }), messages, false ); return measuredStreamRequest; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
handleStream(response, stream, responseProps) { return new Promise((resolve) => { let fullText = ""; const { uuid = v4(), sources = [] } = responseProps; let usage = { prompt_tokens: 0, completion_tokens: 0, }; // Establish listener to early-abort a streaming response // in case things go sideways or the user does not like the response. // We preserve the generated text but continue as if chat was completed // to preserve previously generated content. const handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }; response.on("close", handleAbort); stream.on("error", (event) => { const parseErrorMsg = (event) => { const error = event?.error?.error; if (!!error) return `Anthropic Error:${error?.type || "unknown"} ${ error?.message || "unknown error." }`; return event.message; }; writeResponseChunk(response, { uuid, sources: [], type: "abort", textResponse: null, close: true, error: parseErrorMsg(event), }); response.removeListener("close", handleAbort); stream?.endMeasurement(usage); resolve(fullText); }); stream.on("streamEvent", (message) => { const data = message; if (data.type === "message_start") usage.prompt_tokens = data?.message?.usage?.input_tokens; if (data.type === "message_delta") usage.completion_tokens = data?.usage?.output_tokens; if ( data.type === "content_block_delta" && data.delta.type === "text_delta" ) { const text = data.delta.text; fullText += text; writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: text, close: false, error: false, }); } if ( message.type === "message_stop" || (data.stop_reason && data.stop_reason === "end_turn") ) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); stream?.endMeasurement(usage); resolve(fullText); } }); }); }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
handleAbort = () => { stream?.endMeasurement(usage); clientAbortedHandler(resolve, fullText); }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
parseErrorMsg = (event) => { const error = event?.error?.error; if (!!error) return `Anthropic Error:${error?.type || "unknown"} ${ error?.message || "unknown error." }`; return event.message; }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
parseErrorMsg
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
parseErrorMsg = (event) => { const error = event?.error?.error; if (!!error) return `Anthropic Error:${error?.type || "unknown"} ${ error?.message || "unknown error." }`; return event.message; }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
parseErrorMsg
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageStringCompressor } = require("../../helpers/chat"); const compressedPrompt = await messageStringCompressor( this, promptArgs, rawHistory ); return compressedPrompt; }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Handles the stream response from the Anthropic API. @param {Object} response - the response object @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking @param {Object} responseProps - the response properties @returns {Promise<string>}
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/anthropic/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/anthropic/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `ApiPie chat: ${this.model} is not valid for chat completion!` ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, }) .catch((e) => { throw new Error(e.message); }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage?.prompt_tokens || 0, completion_tokens: result.output.usage?.completion_tokens || 0, total_tokens: result.output.usage?.total_tokens || 0, outputTps: (result.output.usage?.completion_tokens || 0) / result.duration, duration: result.duration, }, }; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `ApiPie chat: ${this.model} is not valid for chat completion!` ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages ); return measuredStreamRequest; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
handleStream(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps; return new Promise(async (resolve) => { let fullText = ""; // Establish listener to early-abort a streaming response // in case things go sideways or the user does not like the response. // We preserve the generated text but continue as if chat was completed // to preserve previously generated content. const handleAbort = () => { stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); clientAbortedHandler(resolve, fullText); }; response.on("close", handleAbort); try { for await (const chunk of stream) { const message = chunk?.choices?.[0]; const token = message?.delta?.content; if (token) { fullText += token; writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: token, close: false, error: false, }); } if (message === undefined || message.finish_reason !== null) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); resolve(fullText); } } } catch (e) { writeResponseChunk(response, { uuid, sources, type: "abort", textResponse: null, close: true, error: e.message, }); response.removeListener("close", handleAbort); stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); resolve(fullText); } }); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
handleAbort = () => { stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); clientAbortedHandler(resolve, fullText); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
handleAbort = () => { stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); clientAbortedHandler(resolve, fullText); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
handleAbort
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
async function fetchApiPieModels(providedApiKey = null) { const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null; return await fetch(`https://apipie.ai/v1/models`, { method: "GET", headers: { "Content-Type": "application/json", ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}), }, }) .then((res) => res.json()) .then(({ data = [] }) => { const models = {}; data.forEach((model) => { models[`${model.provider}/${model.model}`] = { id: `${model.provider}/${model.model}`, name: `${model.provider}/${model.model}`, organization: model.provider, subtype: model.subtype, maxLength: model.max_tokens, }; }); // Cache all response information if (!fs.existsSync(cacheFolder)) fs.mkdirSync(cacheFolder, { recursive: true }); fs.writeFileSync( path.resolve(cacheFolder, "models.json"), JSON.stringify(models), { encoding: "utf-8", } ); fs.writeFileSync( path.resolve(cacheFolder, ".cached_at"), String(Number(new Date())), { encoding: "utf-8", } ); return models; }) .catch((e) => { console.error(e); return {}; }); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
fetchApiPieModels
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/apipie/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/apipie/index.js
MIT
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], // This is the specific attachment for only this prompt }) { const prompt = { role: this.isOTypeModel ? "user" : "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
constructPrompt
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
async getChatCompletion(messages = [], { temperature = 0.7 }) { if (!this.model) throw new Error( "No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5." ); const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions.create({ messages, model: this.model, ...(this.isOTypeModel ? {} : { temperature }), }) ); if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null; return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage.prompt_tokens || 0, completion_tokens: result.output.usage.completion_tokens || 0, total_tokens: result.output.usage.total_tokens || 0, outputTps: result.output.usage.completion_tokens / result.duration, duration: result.duration, }, }; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
getChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
async streamGetChatCompletion(messages = [], { temperature = 0.7 }) { if (!this.model) throw new Error( "No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5." ); const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( await this.openai.chat.completions.create({ messages, model: this.model, ...(this.isOTypeModel ? {} : { temperature }), n: 1, stream: true, }), messages ); return measuredStreamRequest; }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
streamGetChatCompletion
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
handleStream(response, stream, responseProps) { return handleDefaultStreamResponseV2(response, stream, responseProps); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
handleStream
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
embedTextInput
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
embedChunks
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }
Generates appropriate content array for a message + attachments. @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} @returns {string|object[]}
compressMessages
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/azureOpenAi/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/azureOpenAi/index.js
MIT
constructor(embedder = null, modelPreference = null) { const requiredEnvVars = [ ...(this.authMethod !== "iam_role" ? [ // required for iam and sessionToken "AWS_BEDROCK_LLM_ACCESS_KEY_ID", "AWS_BEDROCK_LLM_ACCESS_KEY", ] : []), ...(this.authMethod === "sessionToken" ? [ // required for sessionToken "AWS_BEDROCK_LLM_SESSION_TOKEN", ] : []), "AWS_BEDROCK_LLM_REGION", "AWS_BEDROCK_LLM_MODEL_PREFERENCE", ]; // Validate required environment variables for (const envVar of requiredEnvVars) { if (!process.env[envVar]) throw new Error(`Required environment variable ${envVar} is not set.`); } this.model = modelPreference || process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE; const contextWindowLimit = this.promptWindowLimit(); this.limits = { history: Math.floor(contextWindowLimit * 0.15), system: Math.floor(contextWindowLimit * 0.15), user: Math.floor(contextWindowLimit * 0.7), }; this.bedrockClient = new BedrockRuntimeClient({ region: process.env.AWS_BEDROCK_LLM_REGION, credentials: this.credentials, }); this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.#log( `Initialized with model: ${this.model}. Auth: ${this.authMethod}. Context Window: ${contextWindowLimit}.` ); }
Initializes the AWS Bedrock LLM connector. @param {object | null} [embedder=null] - An optional embedder instance. Defaults to NativeEmbedder. @param {string | null} [modelPreference=null] - Optional model ID override. Defaults to environment variable. @throws {Error} If required environment variables are missing or invalid.
constructor
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
get credentials() { switch (this.authMethod) { case "iam": // explicit credentials return { accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID, secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY, }; case "sessionToken": // Session token is used for temporary credentials return { accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID, secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY, sessionToken: process.env.AWS_BEDROCK_LLM_SESSION_TOKEN, }; // IAM role is used for long-term credentials implied by system process // is filled by the AWS SDK automatically if we pass in no credentials case "iam_role": return {}; default: return {}; } }
Gets the credentials for the AWS Bedrock LLM based on the authentication method provided. @returns {object} The credentials object.
credentials
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
get authMethod() { const method = process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam"; return SUPPORTED_CONNECTION_METHODS.includes(method) ? method : "iam"; }
Gets the configured AWS authentication method ('iam' or 'sessionToken'). Defaults to 'iam' if the environment variable is invalid. @returns {"iam" | "iam_role" | "sessionToken"} The authentication method.
authMethod
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT
streamingEnabled() { return "streamGetChatCompletion" in this; }
Indicates if the provider supports streaming responses. @returns {boolean} True.
streamingEnabled
javascript
Mintplex-Labs/anything-llm
server/utils/AiProviders/bedrock/index.js
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/bedrock/index.js
MIT