code
stringlengths 24
2.07M
| docstring
stringlengths 25
85.3k
| func_name
stringlengths 1
92
| language
stringclasses 1
value | repo
stringlengths 5
64
| path
stringlengths 4
172
| url
stringlengths 44
218
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
static activeImportedPlugins() {
const plugins = [];
this.checkPluginFolderExists();
const folders = fs.readdirSync(path.resolve(pluginsPath));
for (const folder of folders) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(folder),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) continue;
const config = safeJsonParse(fs.readFileSync(configLocation, "utf8"));
if (config.active) plugins.push(`@@${config.hubId}`);
}
return plugins;
}
|
Loads plugins from `plugins` folder in storage that are custom loaded and defined.
only loads plugins that are active: true.
@returns {string[]} - array of plugin names to be loaded later.
|
activeImportedPlugins
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
static listImportedPlugins() {
const plugins = [];
this.checkPluginFolderExists();
if (!fs.existsSync(pluginsPath)) return plugins;
const folders = fs.readdirSync(path.resolve(pluginsPath));
for (const folder of folders) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(folder),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) continue;
const config = safeJsonParse(fs.readFileSync(configLocation, "utf8"));
plugins.push(config);
}
return plugins;
}
|
Lists all imported plugins.
@returns {Array} - array of plugin configurations (JSON).
|
listImportedPlugins
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
static updateImportedPlugin(hubId, config) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(hubId),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) return;
const currentConfig = safeJsonParse(
fs.readFileSync(configLocation, "utf8"),
null
);
if (!currentConfig) return;
const updatedConfig = { ...currentConfig, ...config };
fs.writeFileSync(configLocation, JSON.stringify(updatedConfig, null, 2));
return updatedConfig;
}
|
Updates a plugin configuration.
@param {string} hubId - The hub ID of the plugin.
@param {object} config - The configuration to update.
@returns {object} - The updated configuration.
|
updateImportedPlugin
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
static deletePlugin(hubId) {
if (!hubId) throw new Error("No plugin hubID passed.");
const pluginFolder = path.resolve(pluginsPath, normalizePath(hubId));
if (!this.isValidLocation(pluginFolder)) return;
fs.rmSync(pluginFolder, { recursive: true });
return true;
}
|
Deletes a plugin. Removes the entire folder of the object.
@param {string} hubId - The hub ID of the plugin.
@returns {boolean} - True if the plugin was deleted, false otherwise.
|
deletePlugin
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
static validateImportedPluginHandler(hubId) {
const handlerLocation = path.resolve(
pluginsPath,
normalizePath(hubId),
"handler.js"
);
return this.isValidLocation(handlerLocation);
}
|
/**
Validates if the handler.js file exists for the given plugin.
@param {string} hubId - The hub ID of the plugin.
@returns {boolean} - True if the handler.js file exists, false otherwise.
|
validateImportedPluginHandler
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
parseCallOptions() {
const callOpts = {};
if (!this.config.setup_args || typeof this.config.setup_args !== "object") {
return callOpts;
}
for (const [param, definition] of Object.entries(this.config.setup_args)) {
if (definition.required && !definition?.value) {
console.log(
`'${param}' required value for '${this.name}' plugin is missing. Plugin may not function or crash agent.`
);
continue;
}
callOpts[param] = definition.value || definition.default || null;
}
return callOpts;
}
|
/**
Validates if the handler.js file exists for the given plugin.
@param {string} hubId - The hub ID of the plugin.
@returns {boolean} - True if the handler.js file exists, false otherwise.
|
parseCallOptions
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
plugin(runtimeArgs = {}) {
const customFunctions = this.handler.runtime;
return {
runtimeArgs,
name: this.name,
config: this.config,
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
config: this.config,
runtimeArgs: this.runtimeArgs,
description: this.config.description,
logger: aibitat?.handlerProps?.log || console.log, // Allows plugin to log to the console.
introspect: aibitat?.introspect || console.log, // Allows plugin to display a "thought" the chat window UI.
runtime: "docker",
webScraper: sharedWebScraper,
examples: this.config.examples ?? [],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: this.config.entrypoint.params ?? {},
additionalProperties: false,
},
...customFunctions,
});
},
};
}
|
/**
Validates if the handler.js file exists for the given plugin.
@param {string} hubId - The hub ID of the plugin.
@returns {boolean} - True if the handler.js file exists, false otherwise.
|
plugin
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
config: this.config,
runtimeArgs: this.runtimeArgs,
description: this.config.description,
logger: aibitat?.handlerProps?.log || console.log, // Allows plugin to log to the console.
introspect: aibitat?.introspect || console.log, // Allows plugin to display a "thought" the chat window UI.
runtime: "docker",
webScraper: sharedWebScraper,
examples: this.config.examples ?? [],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: this.config.entrypoint.params ?? {},
additionalProperties: false,
},
...customFunctions,
});
}
|
/**
Validates if the handler.js file exists for the given plugin.
@param {string} hubId - The hub ID of the plugin.
@returns {boolean} - True if the handler.js file exists, false otherwise.
|
setup
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
static async importCommunityItemFromUrl(url, item) {
this.checkPluginFolderExists();
const hubId = item.id;
if (!hubId) return { success: false, error: "No hubId passed to import." };
const zipFilePath = path.resolve(pluginsPath, `${item.id}.zip`);
const pluginFile = item.manifest.files.find(
(file) => file.name === "plugin.json"
);
if (!pluginFile)
return {
success: false,
error: "No plugin.json file found in manifest.",
};
const pluginFolder = path.resolve(pluginsPath, normalizePath(hubId));
if (fs.existsSync(pluginFolder))
console.log(
"ImportedPlugin.importCommunityItemFromUrl - plugin folder already exists - will overwrite"
);
try {
const protocol = new URL(url).protocol.replace(":", "");
const httpLib = protocol === "https" ? require("https") : require("http");
const downloadZipFile = new Promise(async (resolve) => {
try {
console.log(
"ImportedPlugin.importCommunityItemFromUrl - downloading asset from ",
new URL(url).origin
);
const zipFile = fs.createWriteStream(zipFilePath);
const request = httpLib.get(url, function (response) {
response.pipe(zipFile);
zipFile.on("finish", () => {
console.log(
"ImportedPlugin.importCommunityItemFromUrl - downloaded zip file"
);
resolve(true);
});
});
request.on("error", (error) => {
console.error(
"ImportedPlugin.importCommunityItemFromUrl - error downloading zip file: ",
error
);
resolve(false);
});
} catch (error) {
console.error(
"ImportedPlugin.importCommunityItemFromUrl - error downloading zip file: ",
error
);
resolve(false);
}
});
const success = await downloadZipFile;
if (!success)
return { success: false, error: "Failed to download zip file." };
// Unzip the file to the plugin folder
// Note: https://github.com/cthackers/adm-zip?tab=readme-ov-file#electron-original-fs
const AdmZip = require("adm-zip");
const zip = new AdmZip(zipFilePath);
zip.extractAllTo(pluginFolder);
// We want to make sure specific keys are set to the proper values for
// plugin.json so we read and overwrite the file with the proper values.
const pluginJsonPath = path.resolve(pluginFolder, "plugin.json");
const pluginJson = safeJsonParse(fs.readFileSync(pluginJsonPath, "utf8"));
pluginJson.active = false;
pluginJson.hubId = hubId;
fs.writeFileSync(pluginJsonPath, JSON.stringify(pluginJson, null, 2));
console.log(
`ImportedPlugin.importCommunityItemFromUrl - successfully imported plugin to agent-skills/${hubId}`
);
return { success: true, error: null };
} catch (error) {
console.error(
"ImportedPlugin.importCommunityItemFromUrl - error: ",
error
);
return { success: false, error: error.message };
} finally {
if (fs.existsSync(zipFilePath)) fs.unlinkSync(zipFilePath);
}
}
|
Imports a community item from a URL.
The community item is a zip file that contains a plugin.json file and handler.js file.
This function will unzip the file and import the plugin into the agent-skills folder
based on the hubId found in the plugin.json file.
The zip file will be downloaded to the pluginsPath folder and then unzipped and finally deleted.
@param {string} url - The signed URL of the community item zip file.
@param {object} item - The community item.
@returns {Promise<object>} - The result of the import.
|
importCommunityItemFromUrl
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/imported.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/imported.js
|
MIT
|
providerDefault(provider = this.provider) {
switch (provider) {
case "openai":
return process.env.OPEN_MODEL_PREF ?? "gpt-4o";
case "anthropic":
return process.env.ANTHROPIC_MODEL_PREF ?? "claude-3-sonnet-20240229";
case "lmstudio":
return process.env.LMSTUDIO_MODEL_PREF ?? "server-default";
case "ollama":
return process.env.OLLAMA_MODEL_PREF ?? "llama3:latest";
case "groq":
return process.env.GROQ_MODEL_PREF ?? "llama3-70b-8192";
case "togetherai":
return (
process.env.TOGETHER_AI_MODEL_PREF ??
"mistralai/Mixtral-8x7B-Instruct-v0.1"
);
case "azure":
return process.env.OPEN_MODEL_PREF;
case "koboldcpp":
return process.env.KOBOLD_CPP_MODEL_PREF ?? null;
case "localai":
return process.env.LOCAL_AI_MODEL_PREF ?? null;
case "openrouter":
return process.env.OPENROUTER_MODEL_PREF ?? "openrouter/auto";
case "mistral":
return process.env.MISTRAL_MODEL_PREF ?? "mistral-medium";
case "generic-openai":
return process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
case "perplexity":
return process.env.PERPLEXITY_MODEL_PREF ?? "sonar-small-online";
case "textgenwebui":
return null;
case "bedrock":
return process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null;
case "fireworksai":
return process.env.FIREWORKS_AI_LLM_MODEL_PREF ?? null;
case "deepseek":
return process.env.DEEPSEEK_MODEL_PREF ?? "deepseek-chat";
case "litellm":
return process.env.LITE_LLM_MODEL_PREF ?? null;
case "apipie":
return process.env.APIPIE_LLM_MODEL_PREF ?? null;
case "xai":
return process.env.XAI_LLM_MODEL_PREF ?? "grok-beta";
case "novita":
return process.env.NOVITA_LLM_MODEL_PREF ?? "deepseek/deepseek-r1";
case "nvidia-nim":
return process.env.NVIDIA_NIM_LLM_MODEL_PREF ?? null;
case "ppio":
return process.env.PPIO_MODEL_PREF ?? "qwen/qwen2.5-32b-instruct";
case "gemini":
return process.env.GEMINI_LLM_MODEL_PREF ?? "gemini-2.0-flash-lite";
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
default:
return null;
}
}
|
Finds the default model for a given provider. If no default model is set for it's associated ENV then
it will return a reasonable base model for the provider if one exists.
@param {string} provider - The provider to find the default model for.
@returns {string|null} The default model for the provider.
|
providerDefault
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/index.js
|
MIT
|
parseCallOptions(args, config = {}, pluginName) {
const callOpts = {};
for (const [param, definition] of Object.entries(config)) {
if (
definition.required &&
(!Object.prototype.hasOwnProperty.call(args, param) ||
args[param] === null)
) {
this.log(
`'${param}' required parameter for '${pluginName}' plugin is missing. Plugin may not function or crash agent.`
);
continue;
}
callOpts[param] = Object.prototype.hasOwnProperty.call(args, param)
? args[param]
: definition.default || null;
}
return callOpts;
}
|
Finds or assumes the model preference value to use for API calls.
If multi-model loading is supported, we use their agent model selection of the workspace
If not supported, we attempt to fallback to the system provider value for the LLM preference
and if that fails - we assume a reasonable base model to exist.
@returns {string|null} the model preference value to use in API calls
|
parseCallOptions
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/index.js
|
MIT
|
async init() {
await this.#validInvocation();
this.#providerSetupAndCheck();
return this;
}
|
Finds or assumes the model preference value to use for API calls.
If multi-model loading is supported, we use their agent model selection of the workspace
If not supported, we attempt to fallback to the system provider value for the LLM preference
and if that fails - we assume a reasonable base model to exist.
@returns {string|null} the model preference value to use in API calls
|
init
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/index.js
|
MIT
|
async createAIbitat(
args = {
socket,
}
) {
this.aibitat = new AIbitat({
provider: this.provider ?? "openai",
model: this.model ?? "gpt-4o",
chats: await this.#chatHistory(20),
handlerProps: {
invocation: this.invocation,
log: this.log,
},
});
// Attach standard websocket plugin for frontend communication.
this.log(`Attached ${AgentPlugins.websocket.name} plugin to Agent cluster`);
this.aibitat.use(
AgentPlugins.websocket.plugin({
socket: args.socket,
muteUserReply: true,
introspection: true,
})
);
// Attach standard chat-history plugin for message storage.
this.log(
`Attached ${AgentPlugins.chatHistory.name} plugin to Agent cluster`
);
this.aibitat.use(AgentPlugins.chatHistory.plugin());
// Load required agents (Default + custom)
await this.#loadAgents();
// Attach all required plugins for functions to operate.
await this.#attachPlugins(args);
}
|
Finds or assumes the model preference value to use for API calls.
If multi-model loading is supported, we use their agent model selection of the workspace
If not supported, we attempt to fallback to the system provider value for the LLM preference
and if that fails - we assume a reasonable base model to exist.
@returns {string|null} the model preference value to use in API calls
|
createAIbitat
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/index.js
|
MIT
|
startAgentCluster() {
return this.aibitat.start({
from: USER_AGENT.name,
to: this.channel ?? WORKSPACE_AGENT.name,
content: this.invocation.prompt,
});
}
|
Finds or assumes the model preference value to use for API calls.
If multi-model loading is supported, we use their agent model selection of the workspace
If not supported, we attempt to fallback to the system provider value for the LLM preference
and if that fails - we assume a reasonable base model to exist.
@returns {string|null} the model preference value to use in API calls
|
startAgentCluster
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/index.js
|
MIT
|
constructor(props = {}) {
const {
chats = [],
interrupt = "NEVER",
maxRounds = 100,
provider = "openai",
handlerProps = {}, // Inherited props we can spread so aibitat can access.
...rest
} = props;
this._chats = chats;
this.defaultInterrupt = interrupt;
this.maxRounds = maxRounds;
this.handlerProps = handlerProps;
this.defaultProvider = {
provider,
...rest,
};
this.provider = this.defaultProvider.provider;
this.model = this.defaultProvider.model;
}
|
Temporary flag to skip the handleExecution function
This is used to return the result of a flow execution directly to the chat
without going through the handleExecution function (resulting in more LLM processing)
Setting Skip execution to true will prevent any further tool calls from being executed.
This is useful for flow executions that need to return a result directly to the chat but
can also prevent tool-call chaining.
@type {boolean}
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
get chats() {
return this._chats;
}
|
Get the chat history between agents and channels.
|
chats
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
agent(name = "", config = {}) {
this.agents.set(name, config);
return this;
}
|
Add a new agent to the AIbitat.
@param name
@param config
@returns
|
agent
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
channel(name = "", members = [""], config = {}) {
this.channels.set(name, {
members,
...config,
});
return this;
}
|
Add a new channel to the AIbitat.
@param name
@param members
@param config
@returns
|
channel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
getAgentConfig(agent = "") {
const config = this.agents.get(agent);
if (!config) {
throw new Error(`Agent configuration "${agent}" not found`);
}
return {
role: "You are a helpful AI assistant.",
// role: `You are a helpful AI assistant.
// Solve tasks using your coding and language skills.
// In the following cases, suggest typescript code (in a typescript coding block) or shell script (in a sh coding block) for the user to execute.
// 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
// 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
// Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
// When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
// If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
// If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
// When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
// Reply "TERMINATE" when everything is done.`,
...config,
};
}
|
Get the specific agent configuration.
@param agent The name of the agent.
@throws When the agent configuration is not found.
@returns The agent configuration.
|
getAgentConfig
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
getChannelConfig(channel = "") {
const config = this.channels.get(channel);
if (!config) {
throw new Error(`Channel configuration "${channel}" not found`);
}
return {
maxRounds: 10,
role: "",
...config,
};
}
|
Get the specific channel configuration.
@param channel The name of the channel.
@throws When the channel configuration is not found.
@returns The channel configuration.
|
getChannelConfig
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
getGroupMembers(node = "") {
const group = this.getChannelConfig(node);
return group.members;
}
|
Get the members of a group.
@throws When the group is not defined as an array in the connections.
@param node The name of the group.
@returns The members of the group.
|
getGroupMembers
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
onAbort(listener = () => null) {
this.emitter.on("abort", listener);
return this;
}
|
Triggered when a plugin, socket, or command is aborted.
@param listener
@returns
|
onAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
abort() {
this.emitter.emit("abort", null, this);
}
|
Abort the running of any plugins that may still be pending (Langchain summarize)
|
abort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
onTerminate(listener = () => null) {
this.emitter.on("terminate", listener);
return this;
}
|
Triggered when a chat is terminated. After this, the chat can't be continued.
@param listener
@returns
|
onTerminate
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
terminate(node = "") {
this.emitter.emit("terminate", node, this);
}
|
Terminate the chat. After this, the chat can't be continued.
@param node Last node to chat with
|
terminate
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
onInterrupt(listener = () => null) {
this.emitter.on("interrupt", listener);
return this;
}
|
Triggered when a chat is interrupted by a node.
@param listener
@returns
|
onInterrupt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
interrupt(route) {
this._chats.push({
...route,
state: "interrupt",
});
this.emitter.emit("interrupt", route, this);
}
|
Interruption the chat.
@param route The nodes that participated in the interruption.
@returns
|
interrupt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
onMessage(listener = (chat) => null) {
this.emitter.on("message", listener);
return this;
}
|
Triggered when a message is added to the chat history.
This can either be the first message or a reply to a message.
@param listener
@returns
|
onMessage
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
newMessage(message) {
const chat = {
...message,
state: "success",
};
this._chats.push(chat);
this.emitter.emit("message", chat, this);
}
|
Register a new successful message in the chat history.
This will trigger the `onMessage` event.
@param message
|
newMessage
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
onError(
listener = (
/**
* The error that occurred.
*
* Native errors are:
* - `APIError`
* - `AuthorizationError`
* - `UnknownError`
* - `RateLimitError`
* - `ServerError`
*/
error = null,
/**
* The message when the error occurred.
*/
{}
) => null
) {
this.emitter.on("replyError", listener);
return this;
}
|
Triggered when an error occurs during the chat.
@param listener
@returns
|
onError
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
newError(route, error) {
const chat = {
...route,
content: error instanceof Error ? error.message : String(error),
state: "error",
};
this._chats.push(chat);
this.emitter.emit("replyError", error, chat);
}
|
Register an error in the chat history.
This will trigger the `onError` event.
@param route
@param error
|
newError
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
onStart(listener = (chat, aibitat) => null) {
this.emitter.on("start", listener);
return this;
}
|
Triggered when a chat is interrupted by a node.
@param listener
@returns
|
onStart
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async start(message) {
// register the message in the chat history
this.newMessage(message);
this.emitter.emit("start", message, this);
// ask the node to reply
await this.chat({
to: message.from,
from: message.to,
});
return this;
}
|
Start a new chat.
@param message The message to start the chat.
|
start
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async chat(route, keepAlive = true) {
// check if the message is for a group
// if it is, select the next node to chat with from the group
// and then ask them to reply.
if (this.channels.get(route.from)) {
// select a node from the group
let nextNode;
try {
nextNode = await this.selectNext(route.from);
} catch (error) {
if (error instanceof APIError) {
return this.newError({ from: route.from, to: route.to }, error);
}
throw error;
}
if (!nextNode) {
// TODO: should it throw an error or keep the chat alive when there is no node to chat with in the group?
// maybe it should wrap up the chat and reply to the original node
// For now, it will terminate the chat
this.terminate(route.from);
return;
}
const nextChat = {
from: nextNode,
to: route.from,
};
if (this.shouldAgentInterrupt(nextNode)) {
this.interrupt(nextChat);
return;
}
// get chats only from the group's nodes
const history = this.getHistory({ to: route.from });
const group = this.getGroupMembers(route.from);
const rounds = history.filter((chat) => group.includes(chat.from)).length;
const { maxRounds } = this.getChannelConfig(route.from);
if (rounds >= maxRounds) {
this.terminate(route.to);
return;
}
await this.chat(nextChat);
return;
}
// If it's a direct message, reply to the message
let reply = "";
try {
reply = await this.reply(route);
} catch (error) {
if (error instanceof APIError) {
return this.newError({ from: route.from, to: route.to }, error);
}
throw error;
}
if (
reply === "TERMINATE" ||
this.hasReachedMaximumRounds(route.from, route.to)
) {
this.terminate(route.to);
return;
}
const newChat = { to: route.from, from: route.to };
if (
reply === "INTERRUPT" ||
(this.agents.get(route.to) && this.shouldAgentInterrupt(route.to))
) {
this.interrupt(newChat);
return;
}
if (keepAlive) {
// keep the chat alive by replying to the other node
await this.chat(newChat, true);
}
}
|
Recursively chat between two nodes.
@param route
@param keepAlive Whether to keep the chat alive.
|
chat
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
shouldAgentInterrupt(agent = "") {
const config = this.getAgentConfig(agent);
return this.defaultInterrupt === "ALWAYS" || config.interrupt === "ALWAYS";
}
|
Check if the agent should interrupt the chat based on its configuration.
@param agent
@returns {boolean} Whether the agent should interrupt the chat.
|
shouldAgentInterrupt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async selectNext(channel = "") {
// get all members of the group
const nodes = this.getGroupMembers(channel);
const channelConfig = this.getChannelConfig(channel);
// TODO: move this to when the group is created
// warn if the group is underpopulated
if (nodes.length < 3) {
console.warn(
`- Group (${channel}) is underpopulated with ${nodes.length} agents. Direct communication would be more efficient.`
);
}
// get the nodes that have not reached the maximum number of rounds
const availableNodes = nodes.filter(
(node) => !this.hasReachedMaximumRounds(channel, node)
);
// remove the last node that chatted with the channel, so it doesn't chat again
const lastChat = this._chats.filter((c) => c.to === channel).at(-1);
if (lastChat) {
const index = availableNodes.indexOf(lastChat.from);
if (index > -1) {
availableNodes.splice(index, 1);
}
}
// TODO: what should it do when there is no node to chat with?
if (!availableNodes.length) return;
// get the provider that will be used for the channel
// if the channel has a provider, use that otherwise
// use the GPT-4 because it has a better reasoning
const provider = this.getProviderForConfig({
// @ts-expect-error
model: "gpt-4",
...this.defaultProvider,
...channelConfig,
});
const history = this.getHistory({ to: channel });
// build the messages to send to the provider
const messages = [
{
role: "system",
content: channelConfig.role,
},
{
role: "user",
content: `You are in a role play game. The following roles are available:
${availableNodes
.map((node) => `@${node}: ${this.getAgentConfig(node).role}`)
.join("\n")}.
Read the following conversation.
CHAT HISTORY
${history.map((c) => `@${c.from}: ${c.content}`).join("\n")}
Then select the next role from that is going to speak next.
Only return the role.
`,
},
];
// ask the provider to select the next node to chat with
// and remove the @ from the response
const { result } = await provider.complete(messages);
const name = result?.replace(/^@/g, "");
if (this.agents.get(name)) return name;
// if the name is not in the nodes, return a random node
return availableNodes[Math.floor(Math.random() * availableNodes.length)];
}
|
Select the next node to chat with from a group. The node will be selected based on the history of chats.
It will select the node that has not reached the maximum number of rounds yet and has not chatted with the channel in the last round.
If it could not determine the next node, it will return a random node.
@param channel The name of the group.
@returns The name of the node to chat with.
|
selectNext
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
hasReachedMaximumRounds(from = "", to = "") {
return this.getHistory({ from, to }).length >= this.maxRounds;
}
|
Check if the chat has reached the maximum number of rounds.
|
hasReachedMaximumRounds
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async reply(route) {
// get the provider for the node that will reply
const fromConfig = this.getAgentConfig(route.from);
const chatHistory =
// if it is sending message to a group, send the group chat history to the provider
// otherwise, send the chat history between the two nodes
this.channels.get(route.to)
? [
{
role: "user",
content: `You are in a whatsapp group. Read the following conversation and then reply.
Do not add introduction or conclusion to your reply because this will be a continuous conversation. Don't introduce yourself.
CHAT HISTORY
${this.getHistory({ to: route.to })
.map((c) => `@${c.from}: ${c.content}`)
.join("\n")}
@${route.from}:`,
},
]
: this.getHistory(route).map((c) => ({
content: c.content,
role: c.from === route.to ? "user" : "assistant",
}));
// build the messages to send to the provider
const messages = [
{
content: fromConfig.role,
role: "system",
},
// get the history of chats between the two nodes
...chatHistory,
];
// get the functions that the node can call
const functions = fromConfig.functions
?.map((name) => this.functions.get(this.#parseFunctionName(name)))
.filter((a) => !!a);
const provider = this.getProviderForConfig({
...this.defaultProvider,
...fromConfig,
});
// get the chat completion
const content = await this.handleExecution(
provider,
messages,
functions,
route.from
);
this.newMessage({ ...route, content });
return content;
}
|
Ask the for the AI provider to generate a reply to the chat.
@param route.to The node that sent the chat.
@param route.from The node that will reply to the chat.
|
reply
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async handleExecution(
provider,
messages = [],
functions = [],
byAgent = null
) {
// get the chat completion
const completion = await provider.complete(messages, functions);
if (completion.functionCall) {
const { name, arguments: args } = completion.functionCall;
const fn = this.functions.get(name);
// if provider hallucinated on the function name
// ask the provider to complete again
if (!fn) {
return await this.handleExecution(
provider,
[
...messages,
{
name,
role: "function",
content: `Function "${name}" not found. Try again.`,
},
],
functions,
byAgent
);
}
// Execute the function and return the result to the provider
fn.caller = byAgent || "agent";
// If provider is verbose, log the tool call to the frontend
if (provider?.verbose) {
this?.introspect?.(
`[debug]: ${fn.caller} is attempting to call \`${name}\` tool`
);
}
// Always log the tool call to the console for debugging purposes
this.handlerProps?.log?.(
`[debug]: ${fn.caller} is attempting to call \`${name}\` tool`
);
const result = await fn.handler(args);
Telemetry.sendTelemetry("agent_tool_call", { tool: name }, null, true);
// If the tool call has direct output enabled, return the result directly to the chat
// without any further processing and no further tool calls will be run.
if (this.skipHandleExecution) {
this.skipHandleExecution = false; // reset the flag to prevent next tool call from being skipped
this?.introspect?.(
`The tool call has direct output enabled! The result will be returned directly to the chat without any further processing and no further tool calls will be run.`
);
this?.introspect?.(`Tool use completed.`);
this.handlerProps?.log?.(
`${fn.caller} tool call resulted in direct output! Returning raw result as string. NO MORE TOOL CALLS WILL BE EXECUTED.`
);
return result;
}
return await this.handleExecution(
provider,
[
...messages,
{
name,
role: "function",
content: result,
},
],
functions,
byAgent
);
}
return completion?.result;
}
|
Ask the for the AI provider to generate a reply to the chat.
@param route.to The node that sent the chat.
@param route.from The node that will reply to the chat.
|
handleExecution
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async continue(feedback) {
const lastChat = this._chats.at(-1);
if (!lastChat || lastChat.state !== "interrupt") {
throw new Error("No chat to continue");
}
// remove the last chat's that was interrupted
this._chats.pop();
const { from, to } = lastChat;
if (this.hasReachedMaximumRounds(from, to)) {
throw new Error("Maximum rounds reached");
}
if (feedback) {
const message = {
from,
to,
content: feedback,
};
// register the message in the chat history
this.newMessage(message);
// ask the node to reply
await this.chat({
to: message.from,
from: message.to,
});
} else {
await this.chat({ from, to });
}
return this;
}
|
Continue the chat from the last interruption.
If the last chat was not an interruption, it will throw an error.
Provide a feedback where it was interrupted if you want to.
@param feedback The feedback to the interruption if any.
@returns
|
continue
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async retry() {
const lastChat = this._chats.at(-1);
if (!lastChat || lastChat.state !== "error") {
throw new Error("No chat to retry");
}
// remove the last chat's that threw an error
const { from, to } = this?._chats?.pop();
await this.chat({ from, to });
return this;
}
|
Retry the last chat that threw an error.
If the last chat was not an error, it will throw an error.
|
retry
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
getHistory({ from, to }) {
return this._chats.filter((chat) => {
const isSuccess = chat.state === "success";
// return all chats to the node
if (!from) {
return isSuccess && chat.to === to;
}
// get all chats from the node
if (!to) {
return isSuccess && chat.from === from;
}
// check if the chat is between the two nodes
const hasSent = chat.from === from && chat.to === to;
const hasReceived = chat.from === to && chat.to === from;
const mutual = hasSent || hasReceived;
return isSuccess && mutual;
});
}
|
Get the chat history between two nodes or all chats to/from a node.
|
getHistory
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
getProviderForConfig(config) {
if (typeof config.provider === "object") {
return config.provider;
}
switch (config.provider) {
case "openai":
return new Providers.OpenAIProvider({ model: config.model });
case "anthropic":
return new Providers.AnthropicProvider({ model: config.model });
case "lmstudio":
return new Providers.LMStudioProvider({ model: config.model });
case "ollama":
return new Providers.OllamaProvider({ model: config.model });
case "groq":
return new Providers.GroqProvider({ model: config.model });
case "togetherai":
return new Providers.TogetherAIProvider({ model: config.model });
case "azure":
return new Providers.AzureOpenAiProvider({ model: config.model });
case "koboldcpp":
return new Providers.KoboldCPPProvider({});
case "localai":
return new Providers.LocalAIProvider({ model: config.model });
case "openrouter":
return new Providers.OpenRouterProvider({ model: config.model });
case "mistral":
return new Providers.MistralProvider({ model: config.model });
case "generic-openai":
return new Providers.GenericOpenAiProvider({ model: config.model });
case "perplexity":
return new Providers.PerplexityProvider({ model: config.model });
case "textgenwebui":
return new Providers.TextWebGenUiProvider({});
case "bedrock":
return new Providers.AWSBedrockProvider({});
case "fireworksai":
return new Providers.FireworksAIProvider({ model: config.model });
case "nvidia-nim":
return new Providers.NvidiaNimProvider({ model: config.model });
case "deepseek":
return new Providers.DeepSeekProvider({ model: config.model });
case "litellm":
return new Providers.LiteLLMProvider({ model: config.model });
case "apipie":
return new Providers.ApiPieProvider({ model: config.model });
case "xai":
return new Providers.XAIProvider({ model: config.model });
case "novita":
return new Providers.NovitaProvider({ model: config.model });
case "ppio":
return new Providers.PPIOProvider({ model: config.model });
case "gemini":
return new Providers.GeminiProvider({ model: config.model });
case "dpais":
return new Providers.DellProAiStudioProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
);
}
}
|
Get provider based on configurations.
If the provider is a string, it will return the default provider for that string.
@param config The provider configuration.
|
getProviderForConfig
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
function(functionConfig) {
this.functions.set(functionConfig.name, functionConfig);
return this;
}
|
Register a new function to be called by the AIbitat agents.
You are also required to specify the which node can call the function.
@param functionConfig The function configuration.
|
function
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/index.js
|
MIT
|
async start(controller) {
for (const chunk of chunks) {
const bytes = new TextEncoder().encode(chunk + " ");
controller.enqueue(bytes);
await new Promise((r) =>
setTimeout(
r,
// get a random number between 10ms and 50ms to simulate a random delay
Math.floor(Math.random() * 40) + 10
)
);
}
controller.close();
}
|
Print a message on the terminal
@param message
// message Type { from: string; to: string; content?: string } & {
state: 'loading' | 'error' | 'success' | 'interrupt'
}
@param simulateStream
|
start
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/cli.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/cli.js
|
MIT
|
setup(aibitat) {
const folderPath = path.dirname(filename);
// get path from filename
if (folderPath) {
fs.mkdirSync(folderPath, { recursive: true });
}
aibitat.onMessage(() => {
const content = JSON.stringify(aibitat.chats, null, 2);
fs.writeFile(filename, content, (err) => {
if (err) {
console.error(err);
}
});
});
}
|
Plugin to save chat history to a json file
|
setup
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/file-history.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/file-history.js
|
MIT
|
setup(aibitat) {
aibitat.onError(async (error) => {
let errorMessage =
error?.message || "An error occurred while running the agent.";
console.error(chalk.red(` error: ${errorMessage}`), error);
aibitat.introspect(
`Error encountered while running: ${errorMessage}`
);
handler.send(
JSON.stringify({ type: "wssFailure", content: errorMessage })
);
aibitat.terminate();
});
aibitat.introspect = (messageText) => {
if (!introspection) return; // Dump thoughts when not wanted.
handler.send(
JSON.stringify({ type: "statusResponse", content: messageText })
);
};
// expose function for sockets across aibitat
// type param must be set or else msg will not be shown or handled in UI.
aibitat.socket = {
send: (type = "__unhandled", content = "") => {
handler.send(JSON.stringify({ type, content }));
},
};
// We can only receive one message response with HTTP
// so we end on first response.
aibitat.onMessage((message) => {
if (message.from !== "USER")
Telemetry.sendTelemetry("agent_chat_sent");
if (message.from === "USER" && muteUserReply) return;
handler.send(JSON.stringify(message));
handler.close();
});
aibitat.onTerminate(() => {
handler.close();
});
}
|
HTTP Interface plugin for Aibitat to emulate a websocket interface in the agent
framework so we dont have to modify the interface for passing messages and responses
in REST or WSS.
|
setup
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/http-socket.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/http-socket.js
|
MIT
|
middleTruncate(str, length = 5) {
if (str.length <= length) return str;
return `${str.slice(0, length)}...${str.slice(-length)}`;
}
|
Utility function to truncate a string to a given length for debugging
calls to the API while keeping the actual values mostly intact
@param {string} str - The string to truncate
@param {number} length - The length to truncate the string to
@returns {string} The truncated string
|
middleTruncate
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/web-browsing.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/web-browsing.js
|
MIT
|
function getDBClient(identifier = "", connectionConfig = {}) {
switch (identifier) {
case "mysql":
const { MySQLConnector } = require("./MySQL");
return new MySQLConnector(connectionConfig);
case "postgresql":
const { PostgresSQLConnector } = require("./Postgresql");
return new PostgresSQLConnector(connectionConfig);
case "sql-server":
const { MSSQLConnector } = require("./MSSQL");
return new MSSQLConnector(connectionConfig);
default:
throw new Error(
`There is no supported database connector for ${identifier}`
);
}
}
|
@param {SQLEngine} identifier
@param {object} connectionConfig
@returns Database Connection Engine Class for SQLAgent or throws error
|
getDBClient
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/index.js
|
MIT
|
async function listSQLConnections() {
return safeJsonParse(
(await SystemSettings.get({ label: "agent_sql_connections" }))?.value,
[]
);
}
|
Lists all of the known database connection that can be used by the agent.
@returns {Promise<[SQLConnection]>}
|
listSQLConnections
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/index.js
|
MIT
|
async runQuery(queryString = "") {
const result = { rows: [], count: 0, error: null };
try {
if (!this.#connected) await this.connect();
const query = await this._client.query(queryString);
result.rows = query.recordset;
result.count = query.rowsAffected.reduce((sum, a) => sum + a, 0);
} catch (err) {
console.log(this.constructor.name, err);
result.error = err.message;
} finally {
await this._client.close();
this.#connected = false;
}
return result;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
runQuery
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MSSQL.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MSSQL.js
|
MIT
|
getTablesSql() {
return `SELECT name FROM sysobjects WHERE xtype='U';`;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
getTablesSql
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MSSQL.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MSSQL.js
|
MIT
|
getTableSchemaSql(table_name) {
return `SELECT COLUMN_NAME,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='${table_name}'`;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
getTableSchemaSql
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MSSQL.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MSSQL.js
|
MIT
|
async runQuery(queryString = "") {
const result = { rows: [], count: 0, error: null };
try {
if (!this.#connected) await this.connect();
const [query] = await this._client.query(queryString);
result.rows = query;
result.count = query?.length;
} catch (err) {
console.log(this.constructor.name, err);
result.error = err.message;
} finally {
await this._client.end();
this.#connected = false;
}
return result;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
runQuery
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MySQL.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MySQL.js
|
MIT
|
getTablesSql() {
return `SELECT table_name FROM information_schema.tables WHERE table_schema = '${this.database_id}'`;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
getTablesSql
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MySQL.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MySQL.js
|
MIT
|
getTableSchemaSql(table_name) {
return `SHOW COLUMNS FROM ${this.database_id}.${table_name};`;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
getTableSchemaSql
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MySQL.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/MySQL.js
|
MIT
|
async runQuery(queryString = "") {
const result = { rows: [], count: 0, error: null };
try {
if (!this.#connected) await this.connect();
const query = await this._client.query(queryString);
result.rows = query.rows;
result.count = query.rowCount;
} catch (err) {
console.log(this.constructor.name, err);
result.error = err.message;
} finally {
await this._client.end();
this.#connected = false;
}
return result;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
runQuery
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/Postgresql.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/Postgresql.js
|
MIT
|
getTablesSql() {
return `SELECT * FROM pg_catalog.pg_tables WHERE schemaname = 'public'`;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
getTablesSql
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/Postgresql.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/Postgresql.js
|
MIT
|
getTableSchemaSql(table_name) {
return ` select column_name, data_type, character_maximum_length, column_default, is_nullable from INFORMATION_SCHEMA.COLUMNS where table_name = '${table_name}'`;
}
|
@param {string} queryString the SQL query to be run
@returns {import(".").QueryResult}
|
getTableSchemaSql
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/Postgresql.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/Postgresql.js
|
MIT
|
format(connectionStringObject) {
if (!connectionStringObject) {
return this.scheme + "://localhost";
}
if (
this.scheme &&
connectionStringObject.scheme &&
this.scheme !== connectionStringObject.scheme
) {
throw new Error(`Scheme not supported: ${connectionStringObject.scheme}`);
}
let uri =
(this.scheme ||
connectionStringObject.scheme ||
ConnectionStringParser.DEFAULT_SCHEME) + "://";
if (connectionStringObject.username) {
uri += encodeURIComponent(connectionStringObject.username);
// Allow empty passwords
if (connectionStringObject.password) {
uri += ":" + encodeURIComponent(connectionStringObject.password);
}
uri += "@";
}
uri += this._formatAddress(connectionStringObject);
// Only put a slash when there is an endpoint
if (connectionStringObject.endpoint) {
uri += "/" + encodeURIComponent(connectionStringObject.endpoint);
}
if (
connectionStringObject.options &&
Object.keys(connectionStringObject.options).length > 0
) {
uri +=
"?" +
Object.keys(connectionStringObject.options)
.map(
(option) =>
encodeURIComponent(option) +
"=" +
encodeURIComponent(connectionStringObject.options[option])
)
.join("&");
}
return uri;
}
|
Takes a connection string object and returns a URI string of the form:
scheme://[username[:password]@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[endpoint]][?options]
@param {Object} connectionStringObject The object that describes connection string parameters
|
format
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
MIT
|
parse(uri) {
const connectionStringParser = new RegExp(
"^\\s*" + // Optional whitespace padding at the beginning of the line
"([^:]+)://" + // Scheme (Group 1)
"(?:([^:@,/?=&]+)(?::([^:@,/?=&]+))?@)?" + // User (Group 2) and Password (Group 3)
"([^@/?=&]+)" + // Host address(es) (Group 4)
"(?:/([^:@,/?=&]+)?)?" + // Endpoint (Group 5)
"(?:\\?([^:@,/?]+)?)?" + // Options (Group 6)
"\\s*$", // Optional whitespace padding at the end of the line
"gi"
);
const connectionStringObject = {};
if (!uri.includes("://")) {
throw new Error(`No scheme found in URI ${uri}`);
}
const tokens = connectionStringParser.exec(uri);
if (Array.isArray(tokens)) {
connectionStringObject.scheme = tokens[1];
if (this.scheme && this.scheme !== connectionStringObject.scheme) {
throw new Error(`URI must start with '${this.scheme}://'`);
}
connectionStringObject.username = tokens[2]
? decodeURIComponent(tokens[2])
: tokens[2];
connectionStringObject.password = tokens[3]
? decodeURIComponent(tokens[3])
: tokens[3];
connectionStringObject.hosts = this._parseAddress(tokens[4]);
connectionStringObject.endpoint = tokens[5]
? decodeURIComponent(tokens[5])
: tokens[5];
connectionStringObject.options = tokens[6]
? this._parseOptions(tokens[6])
: tokens[6];
}
return connectionStringObject;
}
|
Where scheme and hosts will always be present. Other fields will only be present in the result if they were
present in the input.
@param {string} uri The connection string URI
@returns {ConnectionStringObject} The connection string object
|
parse
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
MIT
|
_formatAddress(connectionStringObject) {
return connectionStringObject.hosts
.map(
(address) =>
encodeURIComponent(address.host) +
(address.port
? ":" + encodeURIComponent(address.port.toString(10))
: "")
)
.join(",");
}
|
Formats the address portion of a connection string
@param {Object} connectionStringObject The object that describes connection string parameters
|
_formatAddress
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
MIT
|
_parseAddress(addresses) {
return addresses.split(",").map((address) => {
const i = address.indexOf(":");
return i >= 0
? {
host: decodeURIComponent(address.substring(0, i)),
port: +address.substring(i + 1),
}
: { host: decodeURIComponent(address) };
});
}
|
Parses an address
@param {string} addresses The address(es) to process
|
_parseAddress
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
MIT
|
_parseOptions(options) {
const result = {};
options.split("&").forEach((option) => {
const i = option.indexOf("=");
if (i >= 0) {
result[decodeURIComponent(option.substring(0, i))] = decodeURIComponent(
option.substring(i + 1)
);
}
});
return result;
}
|
Parses options
@param {string} options The options to process
|
_parseOptions
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/plugins/sql-agent/SQLConnectors/utils.js
|
MIT
|
constructor(client) {
if (this.constructor == Provider) {
return;
}
this._client = client;
}
|
@typedef {Object} LangChainModelConfig
@property {(string|null)} baseURL - Override the default base URL process.env for this provider
@property {(string|null)} apiKey - Override the default process.env for this provider
@property {(number|null)} temperature - Override the default temperature
@property {(string|null)} model - Overrides model used for provider.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/ai-provider.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ai-provider.js
|
MIT
|
providerLog(text, ...args) {
console.log(
`\x1b[36m[AgentLLM${this?.model ? ` - ${this.model}` : ""}]\x1b[0m ${text}`,
...args
);
}
|
@typedef {Object} LangChainModelConfig
@property {(string|null)} baseURL - Override the default base URL process.env for this provider
@property {(string|null)} apiKey - Override the default process.env for this provider
@property {(number|null)} temperature - Override the default temperature
@property {(string|null)} model - Overrides model used for provider.
|
providerLog
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/ai-provider.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ai-provider.js
|
MIT
|
get client() {
return this._client;
}
|
@typedef {Object} LangChainModelConfig
@property {(string|null)} baseURL - Override the default base URL process.env for this provider
@property {(string|null)} apiKey - Override the default process.env for this provider
@property {(number|null)} temperature - Override the default temperature
@property {(string|null)} model - Overrides model used for provider.
|
client
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/ai-provider.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ai-provider.js
|
MIT
|
static LangChainChatModel(provider = "openai", config = {}) {
switch (provider) {
// Cloud models
case "openai":
return new ChatOpenAI({
apiKey: process.env.OPEN_AI_KEY,
...config,
});
case "anthropic":
return new ChatAnthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
...config,
});
case "groq":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.groq.com/openai/v1",
},
apiKey: process.env.GROQ_API_KEY,
...config,
});
case "mistral":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.mistral.ai/v1",
},
apiKey: process.env.MISTRAL_API_KEY ?? null,
...config,
});
case "openrouter":
return new ChatOpenAI({
configuration: {
baseURL: "https://openrouter.ai/api/v1",
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-Title": "AnythingLLM",
},
},
apiKey: process.env.OPENROUTER_API_KEY ?? null,
...config,
});
case "perplexity":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.perplexity.ai",
},
apiKey: process.env.PERPLEXITY_API_KEY ?? null,
...config,
});
case "togetherai":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.together.xyz/v1",
},
apiKey: process.env.TOGETHER_AI_API_KEY ?? null,
...config,
});
case "generic-openai":
return new ChatOpenAI({
configuration: {
baseURL: process.env.GENERIC_OPEN_AI_BASE_PATH,
},
apiKey: process.env.GENERIC_OPEN_AI_API_KEY,
maxTokens: toValidNumber(
process.env.GENERIC_OPEN_AI_MAX_TOKENS,
1024
),
...config,
});
case "bedrock":
// Grab just the credentials from the bedrock provider
// using a closure to avoid circular dependency + to avoid instantiating the provider
const credentials = (() => {
const AWSBedrockProvider = require("./bedrock");
const bedrockProvider = new AWSBedrockProvider();
return bedrockProvider.credentials;
})();
return new ChatBedrockConverse({
model: process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE,
region: process.env.AWS_BEDROCK_LLM_REGION,
credentials: credentials,
...config,
});
case "fireworksai":
return new ChatOpenAI({
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
...config,
});
case "apipie":
return new ChatOpenAI({
configuration: {
baseURL: "https://apipie.ai/v1",
},
apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
...config,
});
case "deepseek":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.deepseek.com/v1",
},
apiKey: process.env.DEEPSEEK_API_KEY ?? null,
...config,
});
case "xai":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.x.ai/v1",
},
apiKey: process.env.XAI_LLM_API_KEY ?? null,
...config,
});
case "novita":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.novita.ai/v3/openai",
},
apiKey: process.env.NOVITA_LLM_API_KEY ?? null,
...config,
});
case "ppio":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.ppinfra.com/v3/openai",
},
apiKey: process.env.PPIO_API_KEY ?? null,
...config,
});
case "gemini":
return new ChatOpenAI({
configuration: {
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/",
},
apiKey: process.env.GEMINI_API_KEY ?? null,
...config,
});
// OSS Model Runners
// case "anythingllm_ollama":
// return new ChatOllama({
// baseUrl: process.env.PLACEHOLDER,
// ...config,
// });
case "ollama":
return new ChatOllama({
baseUrl: process.env.OLLAMA_BASE_PATH,
...config,
});
case "lmstudio":
return new ChatOpenAI({
configuration: {
baseURL: parseLMStudioBasePath(process.env.LMSTUDIO_BASE_PATH),
},
apiKey: "not-used", // Needs to be specified or else will assume OpenAI
...config,
});
case "koboldcpp":
return new ChatOpenAI({
configuration: {
baseURL: process.env.KOBOLD_CPP_BASE_PATH,
},
apiKey: "not-used",
...config,
});
case "localai":
return new ChatOpenAI({
configuration: {
baseURL: process.env.LOCAL_AI_BASE_PATH,
},
apiKey: process.env.LOCAL_AI_API_KEY ?? "not-used",
...config,
});
case "textgenwebui":
return new ChatOpenAI({
configuration: {
baseURL: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
},
apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? "not-used",
...config,
});
case "litellm":
return new ChatOpenAI({
configuration: {
baseURL: process.env.LITE_LLM_BASE_PATH,
},
apiKey: process.env.LITE_LLM_API_KEY ?? null,
...config,
});
case "nvidia-nim":
return new ChatOpenAI({
configuration: {
baseURL: process.env.NVIDIA_NIM_LLM_BASE_PATH,
},
apiKey: null,
...config,
});
default:
throw new Error(`Unsupported provider ${provider} for this task.`);
}
}
|
@param {string} provider - the string key of the provider LLM being loaded.
@param {LangChainModelConfig} config - Config to be used to override default connection object.
@returns
|
LangChainChatModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/ai-provider.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/ai-provider.js
|
MIT
|
constructor(config = {}) {
const {
options = {
apiKey: process.env.ANTHROPIC_API_KEY,
maxRetries: 3,
},
model = "claude-2",
} = config;
const client = new Anthropic(options);
super(client);
this.model = model;
}
|
The agent provider for the Anthropic API.
By default, the model is set to 'claude-2'.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/anthropic.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/anthropic.js
|
MIT
|
async complete(messages, functions = []) {
try {
const [systemPrompt, chats] = this.#parseSystemPrompt(messages);
const response = await this.client.messages.create(
{
model: this.model,
max_tokens: 4096,
system: systemPrompt,
messages: this.#sanitize(chats),
stream: false,
...(Array.isArray(functions) && functions?.length > 0
? { tools: this.#formatFunctions(functions) }
: {}),
},
{ headers: { "anthropic-beta": "tools-2024-04-04" } } // Required to we can use tools.
);
// We know that we need to call a tool. So we are about to recurse through completions/handleExecution
// https://docs.anthropic.com/claude/docs/tool-use#how-tool-use-works
if (response.stop_reason === "tool_use") {
// Get the tool call explicitly.
const toolCall = response.content.find(
(res) => res.type === "tool_use"
);
// Here we need the chain of thought the model may or may not have generated alongside the call.
// this needs to be in a very specific format so we always ensure there is a 2-item content array
// so that we can ensure the tool_call content is correct. For anthropic all text items must not
// be empty, but the api will still return empty text so we need to make 100% sure text is not empty
// or the tool call will fail.
// wtf.
let thought = response.content.find((res) => res.type === "text");
thought =
thought?.content?.length > 0
? {
role: thought.role,
content: [
{ type: "text", text: thought.content },
{ ...toolCall },
],
}
: {
role: "assistant",
content: [
{
type: "text",
text: `Okay, im going to use ${toolCall.name} to help me.`,
},
{ ...toolCall },
],
};
// Modify messages forcefully by adding system thought so that tool_use/tool_result
// messaging works with Anthropic's disastrous tool calling API.
messages.push(thought);
const functionArgs = toolCall.input;
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: functionArgs,
},
cost: 0,
};
}
const completion = response.content.find((msg) => msg.type === "text");
return {
result:
completion?.text ??
"The model failed to complete the task and return back a valid response.",
cost: 0,
};
} catch (error) {
// If invalid Auth error we need to abort because no amount of waiting
// will make auth better.
if (error instanceof Anthropic.AuthenticationError) throw error;
if (
error instanceof Anthropic.RateLimitError ||
error instanceof Anthropic.InternalServerError ||
error instanceof Anthropic.APIError // Also will catch AuthenticationError!!!
) {
throw new RetryError(error.message);
}
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the Anthropic API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/anthropic.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/anthropic.js
|
MIT
|
constructor(config = {}) {
const { model = "openrouter/llama-3.1-8b-instruct" } = config;
super();
const client = new OpenAI({
baseURL: "https://apipie.ai/v1",
apiKey: process.env.APIPIE_LLM_API_KEY,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
|
The agent provider for the OpenRouter provider.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/apipie.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/apipie.js
|
MIT
|
get client() {
return this._client;
}
|
The agent provider for the OpenRouter provider.
|
client
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/apipie.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/apipie.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/apipie.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/apipie.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/apipie.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/apipie.js
|
MIT
|
constructor(config = { model: null }) {
const client = new AzureOpenAI({
apiKey: process.env.AZURE_OPENAI_KEY,
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
apiVersion: "2024-12-01-preview",
});
super(client);
this.model = config.model ?? process.env.OPEN_MODEL_PREF;
this.verbose = true;
}
|
The agent provider for the Azure OpenAI API.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/azure.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/azure.js
|
MIT
|
async complete(messages, functions = []) {
try {
const response = await this.client.chat.completions.create({
model: this.model,
// stream: true,
messages,
...(Array.isArray(functions) && functions?.length > 0
? { functions }
: {}),
});
// Right now, we only support one completion,
// so we just take the first one in the list
const completion = response.choices[0].message;
const cost = this.getCost(response.usage);
// treat function calls
if (completion.function_call) {
let functionArgs = {};
try {
functionArgs = JSON.parse(completion.function_call.arguments);
} catch (error) {
// call the complete function again in case it gets a json error
return this.complete(
[
...messages,
{
role: "function",
name: completion.function_call.name,
function_call: completion.function_call,
content: error?.message,
},
],
functions
);
}
// console.log(completion, { functionArgs })
return {
result: null,
functionCall: {
name: completion.function_call.name,
arguments: functionArgs,
},
cost,
};
}
return {
result: completion.content,
cost,
};
} catch (error) {
// If invalid Auth error we need to abort because no amount of waiting
// will make auth better.
if (error instanceof AzureOpenAI.AuthenticationError) throw error;
if (
error instanceof AzureOpenAI.RateLimitError ||
error instanceof AzureOpenAI.InternalServerError ||
error instanceof AzureOpenAI.APIError // Also will catch AuthenticationError!!!
) {
throw new RetryError(error.message);
}
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the OpenAI API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/azure.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/azure.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
Stubbed since Azure OpenAI has no public cost basis.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/azure.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/azure.js
|
MIT
|
constructor(_config = {}) {
super();
const model = process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null;
const client = new ChatBedrockConverse({
region: process.env.AWS_BEDROCK_LLM_REGION,
credentials: this.credentials,
model,
});
this._client = client;
this.model = model;
this.verbose = true;
}
|
The agent provider for the AWS Bedrock provider.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/bedrock.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/bedrock.js
|
MIT
|
get credentials() {
switch (this.authMethod) {
case "iam": // explicit credentials
return {
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
};
case "sessionToken": // Session token is used for temporary credentials
return {
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
sessionToken: process.env.AWS_BEDROCK_LLM_SESSION_TOKEN,
};
// IAM role is used for long-term credentials implied by system process
// is filled by the AWS SDK automatically if we pass in no credentials
case "iam_role":
return {};
default:
return {};
}
}
|
Gets the credentials for the AWS Bedrock LLM based on the authentication method provided.
@returns {object} The credentials object.
|
credentials
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/bedrock.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/bedrock.js
|
MIT
|
get authMethod() {
const method = process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam";
return SUPPORTED_CONNECTION_METHODS.includes(method) ? method : "iam";
}
|
Gets the configured AWS authentication method ('iam' or 'sessionToken').
Defaults to 'iam' if the environment variable is invalid.
@returns {"iam" | "iam_role" | "sessionToken"} The authentication method.
|
authMethod
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/bedrock.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/bedrock.js
|
MIT
|
get client() {
return this._client;
}
|
Gets the configured AWS authentication method ('iam' or 'sessionToken').
Defaults to 'iam' if the environment variable is invalid.
@returns {"iam" | "iam_role" | "sessionToken"} The authentication method.
|
client
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/bedrock.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/bedrock.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.invoke(
this.#convertToLangchainPrototypes(this.cleanMsgs(messages))
);
completion = response;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/bedrock.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/bedrock.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
Stubbed since KoboldCPP has no cost basis.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/bedrock.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/bedrock.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/deepseek.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/deepseek.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/deepseek.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/deepseek.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/dellProAiStudio.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/dellProAiStudio.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
Stubbed since LMStudio has no cost basis.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/dellProAiStudio.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/dellProAiStudio.js
|
MIT
|
constructor(config = {}) {
const { model = "accounts/fireworks/models/llama-v3p1-8b-instruct" } =
config;
super();
const client = new OpenAI({
baseURL: "https://api.fireworks.ai/inference/v1",
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
maxRetries: 0,
});
this._client = client;
this.model = model;
this.verbose = true;
}
|
The agent provider for the FireworksAI provider.
We wrap FireworksAI in UnTooled because its tool-calling may not be supported for specific models and this normalizes that.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/fireworksai.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/fireworksai.js
|
MIT
|
get client() {
return this._client;
}
|
The agent provider for the FireworksAI provider.
We wrap FireworksAI in UnTooled because its tool-calling may not be supported for specific models and this normalizes that.
|
client
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/fireworksai.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/fireworksai.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/fireworksai.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/fireworksai.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/fireworksai.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/fireworksai.js
|
MIT
|
constructor(config = {}) {
const { model = "gemini-2.0-flash-lite" } = config;
super();
const client = new OpenAI({
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/",
apiKey: process.env.GEMINI_API_KEY,
maxRetries: 0,
});
this._client = client;
this.model = model;
this.verbose = true;
}
|
The agent provider for the Gemini provider.
We wrap Gemini in UnTooled because its tool-calling is not supported via the dedicated OpenAI API.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/gemini.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/gemini.js
|
MIT
|
get client() {
return this._client;
}
|
The agent provider for the Gemini provider.
We wrap Gemini in UnTooled because its tool-calling is not supported via the dedicated OpenAI API.
|
client
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/gemini.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/gemini.js
|
MIT
|
formatMessages(messages) {
if (!NO_SYSTEM_PROMPT_MODELS.includes(this.model)) return messages;
// Replace the system message with a user/assistant message pair
const formattedMessages = [];
for (const message of messages) {
if (message.role === "system") {
formattedMessages.push({
role: "user",
content: message.content,
});
formattedMessages.push({
role: "assistant",
content: "Okay, I'll follow your instructions.",
});
continue;
}
formattedMessages.push(message);
}
return formattedMessages;
}
|
Format the messages to the format required by the Gemini API since some models do not support system prompts.
@see {NO_SYSTEM_PROMPT_MODELS}
@param {import("openai").OpenAI.ChatCompletionMessage[]} messages
@returns {import("openai").OpenAI.ChatCompletionMessage[]}
|
formatMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/gemini.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/gemini.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
this.cleanMsgs(this.formatMessages(messages)),
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(this.formatMessages(messages)),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw new APIError(
error?.message
? `${this.constructor.name} encountered an error while executing the request: ${error.message}`
: "There was an error with the Gemini provider executing the request"
);
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/gemini.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/gemini.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/gemini.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/gemini.js
|
MIT
|
constructor(config = {}) {
super();
const { model = "gpt-3.5-turbo" } = config;
const client = new OpenAI({
baseURL: process.env.GENERIC_OPEN_AI_BASE_PATH,
apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
: 1024;
}
|
The agent provider for the Generic OpenAI provider.
Since we cannot promise the generic provider even supports tool calling
which is nearly 100% likely it does not, we can just wrap it in untooled
which often is far better anyway.
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/genericOpenAi.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/genericOpenAi.js
|
MIT
|
get client() {
return this._client;
}
|
The agent provider for the Generic OpenAI provider.
Since we cannot promise the generic provider even supports tool calling
which is nearly 100% likely it does not, we can just wrap it in untooled
which often is far better anyway.
|
client
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/genericOpenAi.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/genericOpenAi.js
|
MIT
|
async complete(messages, functions = []) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
|
Create a completion based on the received messages.
@param messages A list of messages to send to the API.
@param functions
@returns The completion.
|
complete
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/genericOpenAi.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/genericOpenAi.js
|
MIT
|
getCost(_usage) {
return 0;
}
|
Get the cost of the completion.
@param _usage The completion to get the cost for.
@returns The cost of the completion.
|
getCost
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/agents/aibitat/providers/genericOpenAi.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/agents/aibitat/providers/genericOpenAi.js
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.