matt HOFFNER
initial support for direct url llm support
b32fabf
raw
history blame
1.73 kB
import { OpenAIStream, StreamingTextResponse } from "ai";
import { Configuration, OpenAIApi } from "openai-edge";
import { HfInference } from '@huggingface/inference';
import { HuggingFaceStream } from 'ai';
import { experimental_buildLlama2Prompt } from 'ai/prompts';
export const runtime = 'edge';
export default async function(req: Request) {
let { messages, aiProvider = 'openai', systemMessage, url } = await req.json();
// Set up configurations for OpenAI
const openaiConfig = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
baseURL: url ? url : undefined, // Set baseURL to the provided 'url' if it exists
} as any);
const openai = new OpenAIApi(openaiConfig);
const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY);
// Prepend the system message if it's not already there
if (messages.length === 0 || messages[0].role !== "system") {
messages = [{
role: "system",
content: systemMessage
}, ...messages];
}
if (aiProvider === 'openai') {
const response = await openai.createChatCompletion({
model: 'gpt-4',
stream: true,
messages
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
} else {
const response = Hf.textGenerationStream({
// @ts-ignore
model: aiProvider,
inputs: experimental_buildLlama2Prompt(messages),
parameters: {
repetition_penalty: 1,
return_full_text: false
}
});
const stream = HuggingFaceStream(response);
return new StreamingTextResponse(stream);
}
}