File size: 2,284 Bytes
b924465 5acf3a4 7956c78 5acf3a4 5853d12 5acf3a4 4e7f4fc 5853d12 5acf3a4 5853d12 8744c54 b924465 5acf3a4 7956c78 7260a80 7956c78 7260a80 60216ec 7260a80 7956c78 5acf3a4 4038e22 5acf3a4 4038e22 5acf3a4 b924465 7956c78 60216ec d30fa0a 88afc1d 4e7f4fc 88afc1d 4e7f4fc 88afc1d 4e7f4fc 88afc1d 5acf3a4 fd28154 4e7f4fc d36fc40 fd28154 5acf3a4 7956c78 b924465 7956c78 b924465 5acf3a4 7956c78 5acf3a4 7956c78 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import type { Model, ModelWithTokenizer } from "$lib/types";
import { json } from "@sveltejs/kit";
import type { RequestHandler } from "./$types";
import { dev } from "$app/environment";
let cache: ModelWithTokenizer[] | undefined;
export const GET: RequestHandler = async ({ fetch }) => {
if (cache?.length && dev) {
console.log("Skipping load, using in memory cache");
return json(cache);
}
const apiUrl =
"https://huggingface.co/api/models?pipeline_tag=text-generation&filter=conversational&inference_provider=all&limit=100&expand[]=inferenceProviderMapping&expand[]=config&expand[]=library_name&expand[]=pipeline_tag&expand[]=tags&expand[]=mask_token&expand[]=trendingScore";
const res = await fetch(apiUrl, {
credentials: "include",
headers: {
"Upgrade-Insecure-Requests": "1",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Priority": "u=0, i",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
},
method: "GET",
mode: "cors",
});
if (!res.ok) {
console.error(`Error fetching warm models`, res.status, res.statusText);
return json({ models: [] });
}
const compatibleModels: Model[] = await res.json();
compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
const promises = compatibleModels.map(async model => {
const configUrl = `https://huggingface.co/${model.id}/raw/main/tokenizer_config.json`;
const res = await fetch(configUrl, {
credentials: "include",
headers: {
"Upgrade-Insecure-Requests": "1",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Priority": "u=0, i",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
},
method: "GET",
mode: "cors",
});
if (!res.ok) {
// console.error(`Error fetching tokenizer file for ${model.id}`, res.status, res.statusText);
return null; // Ignore failed requests by returning null
}
const tokenizerConfig = await res.json();
return { ...model, tokenizerConfig } satisfies ModelWithTokenizer;
});
const models: ModelWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
cache = models;
return json(cache);
};
|