import { OpenAIStream, StreamingTextResponse } from "ai";
import { Configuration, OpenAIApi } from "openai-edge";
import { HfInference } from '@huggingface/inference';
import { HuggingFaceStream } from 'ai';
import { experimental_buildLlama2Prompt } from 'ai/prompts';
import { LLMStream } from "../../../utils/llm";

export const runtime = 'edge';

export default async function(req: Request) {
    let { messages, aiProvider = 'openai', systemMessage, url } = await req.json();

    // Set up configurations for OpenAI
    const openaiConfig = new Configuration({
        apiKey: process.env.OPENAI_API_KEY
    } as any);
    const openai = new OpenAIApi(openaiConfig, url ? url : undefined);
    const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY);

    // Prepend the system message if it's not already there
    if (messages.length === 0 || messages[0].role !== "system") {
        messages = [{
            role: "system",
            content: systemMessage
        }, ...messages];
    }
    if (url) {
        const stream = await LLMStream(url, messages);
        return new StreamingTextResponse(stream);
    } else if (aiProvider === 'openai') {
        const response = await openai.createChatCompletion({
            model: 'gpt-4o-mini',
            stream: true,
            messages
        });
        const stream = OpenAIStream(response);
        return new StreamingTextResponse(stream);
    } else {
        const response = Hf.textGenerationStream({
            // @ts-ignore
            model: aiProvider,
            inputs: experimental_buildLlama2Prompt(messages),
            parameters: {
                repetition_penalty: 1,
                return_full_text: false
            }
        });

        const stream = HuggingFaceStream(response);
        return new StreamingTextResponse(stream);
    }
}