Spaces:
Running
Running
File size: 2,847 Bytes
7f5876d 73b232d 7f5876d 73b232d 7f5876d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
"use server";
import { AutoTokenizer } from "@xenova/transformers";
import { HfInference } from "@huggingface/inference";
import { formatInformations, transformForInference } from "@/utils/roast";
import { FormProps } from "@/components/form";
import prisma from "@/utils/prisma";
const MODEL_ID = "meta-llama/Llama-3.3-70B-Instruct";
export async function roast({ username, language }: FormProps) {
const userResponse = await fetch(
`https://huggingface.co/api/users/${username}/overview`
);
const user = await userResponse.json();
if (!user || user.error) {
return {
error: user.error ?? "Something wrong happened, please retry.",
status: 404,
};
}
if (!username) {
return { error: "Please provide a valid username", status: 400 };
}
const requests = Promise.all([
await fetch(
`https://huggingface.co/api/spaces?author=${username}&sort=likes&limit=300&full=false&l`
),
await fetch(
`https://huggingface.co/api/models?author=${username}&sort=downloads&limit=300&full=false`
),
await fetch(
`https://huggingface.co/api/collections?owner=${username}&limit=100&sort=upvotes&full=false`
),
]);
const [spacesResponse, modelsResponse, collectionsResponse] = await requests;
const [spaces, models, collections] = await Promise.all([
spacesResponse.json(),
modelsResponse.json(),
collectionsResponse.json(),
]);
const [spacesLikes, modelsLikes] = [spaces, models].map((items) =>
items.reduce((acc: number, item: any) => acc + item.likes, 0)
);
const collectionsUpvotes = collections?.reduce(
(acc: number, item: any) => acc + item.upvotes,
0
);
const datas = formatInformations(
user,
spaces,
models,
collections,
spacesLikes,
modelsLikes,
collectionsUpvotes
);
const chat = transformForInference(
datas,
language,
user.fullname ?? username
);
const hf = new HfInference(process.env.HF_ACCESS_TOKEN);
const tokenizer = await AutoTokenizer.from_pretrained(
"philschmid/meta-llama-3-tokenizer"
);
const formattedPrompt = tokenizer.apply_chat_template(chat, {
tokenize: false,
add_generation_prompt: true,
});
const res = await hf.textGeneration(
{
model: MODEL_ID,
inputs: formattedPrompt as string,
parameters: {
return_full_text: false,
max_new_tokens: 1024,
stop_sequences: ["<|end|>", "<|endoftext|>", "<|assistant|>"],
},
},
{
use_cache: false,
}
);
return {
data: res.generated_text,
};
}
export async function getRoast({ id }: { id: string }) {
const roast = await prisma.quote.findUnique({
where: {
id,
},
});
if (!roast) {
return {
error: "Roast not found",
status: 404,
};
}
return {
data: roast,
};
}
|