code
stringlengths 24
2.07M
| docstring
stringlengths 25
85.3k
| func_name
stringlengths 1
92
| language
stringclasses 1
value | repo
stringlengths 5
64
| path
stringlengths 4
172
| url
stringlengths 44
218
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
false
);
return measuredStreamRequest;
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/mistral/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/mistral/index.js
|
MIT
|
constructor() {
if (ContextWindowFinder.instance) return ContextWindowFinder.instance;
ContextWindowFinder.instance = this;
if (!fs.existsSync(this.cacheLocation))
fs.mkdirSync(this.cacheLocation, { recursive: true });
// If the cache is stale or not found at all, pull the model map from remote
if (this.isCacheStale || !fs.existsSync(this.cacheFilePath))
this.#pullRemoteModelMap();
}
|
Mapping for AnythingLLM provider <> LiteLLM provider
@type {Record<string, string>}
|
constructor
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/modelMap/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/modelMap/index.js
|
MIT
|
log(text, ...args) {
console.log(`\x1b[33m[ContextWindowFinder]\x1b[0m ${text}`, ...args);
}
|
Mapping for AnythingLLM provider <> LiteLLM provider
@type {Record<string, string>}
|
log
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/modelMap/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/modelMap/index.js
|
MIT
|
get isCacheStale() {
if (!fs.existsSync(this.cacheFileExpiryPath)) return true;
const cachedAt = fs.readFileSync(this.cacheFileExpiryPath, "utf8");
return Date.now() - cachedAt > ContextWindowFinder.expiryMs;
}
|
Checks if the cache is stale by checking if the cache file exists and if the cache file is older than the expiry time.
@returns {boolean}
|
isCacheStale
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/modelMap/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/modelMap/index.js
|
MIT
|
get cachedModelMap() {
if (!fs.existsSync(this.cacheFilePath)) {
this.log(`\x1b[33m
--------------------------------
[WARNING] Model map cache is not found!
Invalid context windows will be returned leading to inaccurate model responses
or smaller context windows than expected.
You can fix this by restarting AnythingLLM so the model map is re-pulled.
--------------------------------\x1b[0m`);
return null;
}
if (this.isCacheStale && !this.seenStaleCacheWarning) {
this.log(
"Model map cache is stale - some model context windows may be incorrect. This is OK and the model map will be re-pulled on next boot."
);
this.seenStaleCacheWarning = true;
}
return JSON.parse(
fs.readFileSync(this.cacheFilePath, { encoding: "utf8" })
);
}
|
Gets the cached model map.
Always returns the available model map - even if it is expired since re-pulling
the model map only occurs on container start/system start.
@returns {Record<string, Record<string, number>> | null} - The cached model map
|
cachedModelMap
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/modelMap/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/modelMap/index.js
|
MIT
|
get(provider = null, model = null) {
if (!provider || !this.cachedModelMap || !this.cachedModelMap[provider])
return null;
if (!model) return this.cachedModelMap[provider];
const modelContextWindow = this.cachedModelMap[provider][model];
if (!modelContextWindow) {
this.log("Invalid access to model context window - not found in cache", {
provider,
model,
});
return null;
}
return Number(modelContextWindow);
}
|
Gets the context window for a given provider and model.
If the provider is not found, null is returned.
If the model is not found, the provider's entire model map is returned.
if both provider and model are provided, the context window for the given model is returned.
@param {string|null} provider - The provider to get the context window for
@param {string|null} model - The model to get the context window for
@returns {number|null} - The context window for the given provider and model
|
get
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/modelMap/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/modelMap/index.js
|
MIT
|
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
|
Novita has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
models
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
|
Novita has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
streamingEnabled
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
|
Novita has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
promptWindowLimit
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
|
Novita has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
promptWindowLimit
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
|
Novita has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
isValidChatCompletionModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Novita chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Novita chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages
);
return measuredStreamRequest;
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = this.timeout;
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null; // null when first token is still not received.
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// NOTICE: Not all Novita models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
// no new chunks then we kill the stream and assume it to be complete. Novita is quite fast
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
// we find it is too aggressive.
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
this.log(
`Novita stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}, 500);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
lastChunkTime = Number(new Date());
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
});
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
async function fetchNovitaModels() {
return await fetch(`https://api.novita.ai/v3/openai/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.title,
organization:
model.id.split("/")[0].charAt(0).toUpperCase() +
model.id.split("/")[0].slice(1),
maxLength: model.context_size,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
fetchNovitaModels
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/novita/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/novita/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.nvidiaNim.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.nvidiaNim.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages
);
return measuredStreamRequest;
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
function parseNvidiaNimBasePath(providedBasePath = "") {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1`;
return basePath;
} catch (e) {
return providedBasePath;
}
}
|
Parse the base path for the Nvidia NIM container API. Since the base path must end in /v1 and cannot have a trailing slash,
and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format.
@param {string} basePath
@returns {string}
|
parseNvidiaNimBasePath
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/nvidiaNim/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/nvidiaNim/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent, "spread"),
{
role: "user",
...this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.client
.chat({
model: this.model,
stream: false,
messages,
keep_alive: this.keepAlive,
options: {
temperature,
use_mlock: true,
// There are currently only two performance settings so if its not "base" - its max context.
...(this.performanceMode === "base"
? {}
: { num_ctx: this.promptWindowLimit() }),
},
})
.then((res) => {
return {
content: res.message.content,
usage: {
prompt_tokens: res.prompt_eval_count,
completion_tokens: res.eval_count,
total_tokens: res.prompt_eval_count + res.eval_count,
},
};
})
.catch((e) => {
throw new Error(
`Ollama::getChatCompletion failed to communicate with Ollama. ${this.#errorHandler(e).message}`
);
})
);
if (!result.output.content || !result.output.content.length)
throw new Error(`Ollama::getChatCompletion text response was empty.`);
return {
textResponse: result.output.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens,
completion_tokens: result.output.usage.completion_tokens,
total_tokens: result.output.usage.total_tokens,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.client.chat({
model: this.model,
stream: true,
messages,
keep_alive: this.keepAlive,
options: {
temperature,
use_mlock: true,
// There are currently only two performance settings so if its not "base" - its max context.
...(this.performanceMode === "base"
? {}
: { num_ctx: this.promptWindowLimit() }),
},
}),
messages,
false
).catch((e) => {
throw this.#errorHandler(e);
});
return measuredStreamRequest;
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let usage = {
prompt_tokens: 0,
completion_tokens: 0,
};
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
if (chunk === undefined)
throw new Error(
"Stream returned undefined chunk. Aborting reply - check model provider logs."
);
if (chunk.done) {
usage.prompt_tokens = chunk.prompt_eval_count;
usage.completion_tokens = chunk.eval_count;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break;
}
if (chunk.hasOwnProperty("message")) {
const content = chunk.message.content;
fullText += content;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: content,
close: false,
error: false,
});
}
}
} catch (error) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: "",
close: true,
error: `Ollama:streaming - could not stream chat. ${
error?.cause ?? error.message
}`,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
}
});
}
|
Handles streaming responses from Ollama.
@param {import("express").Response} response
@param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
@param {import("express").Request} request
@returns {Promise<string>}
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
}
|
Handles streaming responses from Ollama.
@param {import("express").Response} response
@param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
@param {import("express").Request} request
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
}
|
Handles streaming responses from Ollama.
@param {import("express").Response} response
@param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
@param {import("express").Request} request
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Handles streaming responses from Ollama.
@param {import("express").Response} response
@param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
@param {import("express").Request} request
@returns {Promise<string>}
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Handles streaming responses from Ollama.
@param {import("express").Response} response
@param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
@param {import("express").Request} request
@returns {Promise<string>}
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Handles streaming responses from Ollama.
@param {import("express").Response} response
@param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
@param {import("express").Request} request
@returns {Promise<string>}
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ollama/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ollama/index.js
|
MIT
|
get isOTypeModel() {
return this.model.startsWith("o");
}
|
Check if the model is an o1 model.
@returns {boolean}
|
isOTypeModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
streamingEnabled() {
// o3-mini is the only o-type model that supports streaming
if (this.isOTypeModel && this.model !== "o3-mini") return false;
return "streamGetChatCompletion" in this;
}
|
Check if the model is an o1 model.
@returns {boolean}
|
streamingEnabled
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
static promptWindowLimit(modelName) {
return MODEL_MAP.get("openai", modelName) ?? 4_096;
}
|
Check if the model is an o1 model.
@returns {boolean}
|
promptWindowLimit
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
promptWindowLimit() {
return MODEL_MAP.get("openai", this.model) ?? 4_096;
}
|
Check if the model is an o1 model.
@returns {boolean}
|
promptWindowLimit
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
async isValidChatCompletionModel(modelName = "") {
const isPreset =
modelName.toLowerCase().includes("gpt") ||
modelName.toLowerCase().startsWith("o");
if (isPreset) return true;
const model = await this.openai.models
.retrieve(modelName)
.then((modelObj) => modelObj)
.catch(() => null);
return !!model;
}
|
Check if the model is an o1 model.
@returns {boolean}
|
isValidChatCompletionModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
// o1 Models do not support the "system" role
// in order to combat this, we can use the "user" role as a replacement for now
// https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880
const prompt = {
role: this.isOTypeModel ? "user" : "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenAI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
}),
messages
// runPromptTokenCalculation: true - We manually count the tokens because OpenAI does not provide them in the stream
// since we are not using the OpenAI API version that supports this `stream_options` param.
// TODO: implement this once we upgrade to the OpenAI API version that supports this param.
);
return measuredStreamRequest;
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openAi/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openAi/index.js
|
MIT
|
get isPerplexityModel() {
return this.model.startsWith("perplexity/");
}
|
Returns true if the model is a Perplexity model.
OpenRouter has support for a lot of models and we have some special handling for Perplexity models
that support in-line citations.
@returns {boolean}
|
isPerplexityModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
enrichToken({ token, citations = [] }) {
if (!Array.isArray(citations) || citations.length === 0) return token;
return token.replace(/\[(\d+)\]/g, (match, index) => {
const citationIndex = parseInt(index) - 1;
return citations[citationIndex]
? `[[${index}](${citations[citationIndex]})]`
: match;
});
}
|
Generic formatting of a token for the following use cases:
- Perplexity models that return inline citations in the token text
@param {{token: string, citations: string[]}} options - The token text and citations.
@returns {string} - The formatted token text.
|
enrichToken
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
|
Generic formatting of a token for the following use cases:
- Perplexity models that return inline citations in the token text
@param {{token: string, citations: string[]}} options - The token text and citations.
@returns {string} - The formatted token text.
|
log
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
|
OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
models
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
|
OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
streamingEnabled
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
|
OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
promptWindowLimit
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
|
OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
promptWindowLimit
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
|
OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
which causes issues in subsequent messages. This timeout value forces us to close the stream after
x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
@returns {number} The timeout value in milliseconds (default: 500)
|
isValidChatCompletionModel
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Parses and prepends reasoning from the response and returns the full text response.
@param {Object} response
@returns {string}
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenRouter chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
throw new Error(
`Invalid response body returned from OpenRouter: ${result.output?.error?.message || "Unknown error"} ${result.output?.error?.code || "Unknown code"}`
);
return {
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Parses and prepends reasoning from the response and returns the full text response.
@param {Object} response
@returns {string}
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenRouter chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
}),
messages
// We have to manually count the tokens
// OpenRouter has a ton of providers and they all can return slightly differently
// some return chunk.usage on STOP, some do it after stop, its inconsistent.
// So it is possible reported metrics are inaccurate since we cannot reliably
// catch the metrics before resolving the stream - so we just pretend this functionality
// is not available.
);
return measuredStreamRequest;
}
|
Parses and prepends reasoning from the response and returns the full text response.
@param {Object} response
@returns {string}
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = this.timeout;
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
let lastChunkTime = null; // null when first token is still not received.
let pplxCitations = []; // Array of inline citations for Perplexity models (if applicable)
let isPerplexity = this.isPerplexityModel;
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// NOTICE: Not all OpenRouter models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
// no new chunks then we kill the stream and assume it to be complete. OpenRouter is quite fast
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
// we find it is too aggressive.
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
console.log(
`OpenRouter stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}, 500);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning;
lastChunkTime = Number(new Date());
// Some models will return citations (e.g. Perplexity) - we should preserve them for inline citations if applicable.
if (
isPerplexity &&
Array.isArray(chunk?.citations) &&
chunk?.citations?.length !== 0
)
pplxCitations.push(...chunk.citations);
// Reasoning models will always return the reasoning text before the token text.
// can be null or ''
if (reasoningToken) {
const formattedReasoningToken = this.enrichToken({
token: reasoningToken,
citations: pplxCitations,
});
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${formattedReasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${formattedReasoningToken}`;
continue;
} else {
// If the reasoning text is not empty, we need to append the reasoning text
// to the existing reasoning text.
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: formattedReasoningToken,
close: false,
error: false,
});
reasoningText += formattedReasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
const formattedToken = this.enrichToken({
token,
citations: pplxCitations,
});
fullText += formattedToken;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: formattedToken,
close: false,
error: false,
});
}
if (message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
clearInterval(timeoutCheck);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
clearInterval(timeoutCheck);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
});
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
async function fetchOpenRouterModels() {
return await fetch(`https://openrouter.ai/api/v1/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.name,
organization:
model.id.split("/")[0].charAt(0).toUpperCase() +
model.id.split("/")[0].slice(1),
maxLength: model.context_length,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
|
Handles the default stream response for a chat.
@param {import("express").Response} response
@param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
@param {Object} responseProps
@returns {Promise<string>}
|
fetchOpenRouterModels
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/openRouter/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/openRouter/index.js
|
MIT
|
enrichToken(token, citations) {
if (!Array.isArray(citations) || citations.length === 0) return token;
return token.replace(/\[(\d+)\]/g, (match, index) => {
const citationIndex = parseInt(index) - 1;
return citations[citationIndex]
? `[[${index}](${citations[citationIndex]})]`
: match;
});
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
enrichToken
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = 800;
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let pplxCitations = []; // Array of links
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null;
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
console.log(
`Perplexity stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
}
}, 500);
// Now handle the chunks from the streamed response and append to fullText.
try {
for await (const chunk of stream) {
lastChunkTime = Number(new Date());
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (Array.isArray(chunk.citations) && chunk.citations.length !== 0) {
pplxCitations = chunk.citations;
}
// If we see usage metrics in the chunk, we can use them directly
// instead of estimating them, but we only want to assign values if
// the response object is the exact same key:value pair we expect.
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
if (token) {
let enrichedToken = this.enrichToken(token, pplxCitations);
fullText += enrichedToken;
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: enrichedToken,
close: false,
error: false,
});
}
if (message?.finish_reason) {
console.log("closing");
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
clearInterval(timeoutCheck);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
clearInterval(timeoutCheck);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
handleAbort
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Enrich a token with citations if available for in-line citations.
@param {string} token - The token to enrich.
@param {Array} citations - The citations to enrich the token with.
@returns {string} The enriched token.
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/perplexity/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/perplexity/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
// attachments = [], - not supported
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`PPIO chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!Object.prototype.hasOwnProperty.call(result.output, "choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`PPIO chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages
);
return measuredStreamRequest;
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
async function fetchPPIOModels() {
return await fetch(`https://api.ppinfra.com/v3/openai/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.PPIO_API_KEY}`,
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
const organization = model.id?.split("/")?.[0] || "PPIO";
models[model.id] = {
id: model.id,
name: model.display_name || model.title || model.id,
organization,
maxLength: model.context_size || 4096,
};
});
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
|
Generates appropriate content array for a message + attachments.
@param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
@returns {string|object[]}
|
fetchPPIOModels
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/ppio/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/ppio/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages
);
return measuredStreamRequest;
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedTextInput
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
embedChunks
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
compressMessages
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/textGenWebUI/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/textGenWebUI/index.js
|
MIT
|
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
constructPrompt
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`xAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
getChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`xAI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
false
);
return measuredStreamRequest;
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
streamGetChatCompletion
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
|
Construct the user prompt for this model.
@param {{attachments: import("../../helpers").Attachment[]}} param0
@returns
|
handleStream
|
javascript
|
Mintplex-Labs/anything-llm
|
server/utils/AiProviders/xai/index.js
|
https://github.com/Mintplex-Labs/anything-llm/blob/master/server/utils/AiProviders/xai/index.js
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.