Thomas G. Lopes
commited on
Commit
·
5853d12
1
Parent(s):
189caf9
minor improvements
Browse files- src/lib/stores/session.ts +2 -1
- src/lib/utils/json.ts +7 -0
- src/routes/+page.server.ts +8 -0
src/lib/stores/session.ts
CHANGED
@@ -9,6 +9,7 @@ import {
|
|
9 |
type ModelWithTokenizer,
|
10 |
type Session,
|
11 |
} from "$lib/types";
|
|
|
12 |
import { getTrending } from "$lib/utils/model";
|
13 |
import { get, writable } from "svelte/store";
|
14 |
import typia from "typia";
|
@@ -64,7 +65,7 @@ function createSessionStore() {
|
|
64 |
if (browser) {
|
65 |
const savedData = localStorage.getItem(LOCAL_STORAGE_KEY);
|
66 |
if (savedData) {
|
67 |
-
const parsed =
|
68 |
const res = typia.validate<Session>(parsed);
|
69 |
if (res.success) savedSession = parsed;
|
70 |
else localStorage.setItem(LOCAL_STORAGE_KEY, JSON.stringify(savedSession));
|
|
|
9 |
type ModelWithTokenizer,
|
10 |
type Session,
|
11 |
} from "$lib/types";
|
12 |
+
import { safeParse } from "$lib/utils/json";
|
13 |
import { getTrending } from "$lib/utils/model";
|
14 |
import { get, writable } from "svelte/store";
|
15 |
import typia from "typia";
|
|
|
65 |
if (browser) {
|
66 |
const savedData = localStorage.getItem(LOCAL_STORAGE_KEY);
|
67 |
if (savedData) {
|
68 |
+
const parsed = safeParse(savedData);
|
69 |
const res = typia.validate<Session>(parsed);
|
70 |
if (res.success) savedSession = parsed;
|
71 |
else localStorage.setItem(LOCAL_STORAGE_KEY, JSON.stringify(savedSession));
|
src/lib/utils/json.ts
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export function safeParse(str: string): any {
|
2 |
+
try {
|
3 |
+
return JSON.parse(str);
|
4 |
+
} catch {
|
5 |
+
return null;
|
6 |
+
}
|
7 |
+
}
|
src/routes/+page.server.ts
CHANGED
@@ -2,7 +2,14 @@ import { env } from "$env/dynamic/private";
|
|
2 |
import type { Model, ModelWithTokenizer } from "$lib/types";
|
3 |
import type { PageServerLoad } from "./$types";
|
4 |
|
|
|
|
|
5 |
export const load: PageServerLoad = async ({ fetch }) => {
|
|
|
|
|
|
|
|
|
|
|
6 |
const apiUrl =
|
7 |
"https://huggingface.co/api/models?pipeline_tag=text-generation&filter=conversational&inference_provider=all&limit=100&expand[]=inferenceProviderMapping&expand[]=config&expand[]=library_name&expand[]=pipeline_tag&expand[]=tags&expand[]=mask_token&expand[]=trendingScore";
|
8 |
const HF_TOKEN = env.HF_TOKEN;
|
@@ -36,5 +43,6 @@ export const load: PageServerLoad = async ({ fetch }) => {
|
|
36 |
|
37 |
const models: ModelWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
|
38 |
|
|
|
39 |
return { models };
|
40 |
};
|
|
|
2 |
import type { Model, ModelWithTokenizer } from "$lib/types";
|
3 |
import type { PageServerLoad } from "./$types";
|
4 |
|
5 |
+
let cache: { models: ModelWithTokenizer[] } | undefined;
|
6 |
+
|
7 |
export const load: PageServerLoad = async ({ fetch }) => {
|
8 |
+
if (cache) {
|
9 |
+
console.log("Skipping load, using in memory cache");
|
10 |
+
return cache;
|
11 |
+
}
|
12 |
+
|
13 |
const apiUrl =
|
14 |
"https://huggingface.co/api/models?pipeline_tag=text-generation&filter=conversational&inference_provider=all&limit=100&expand[]=inferenceProviderMapping&expand[]=config&expand[]=library_name&expand[]=pipeline_tag&expand[]=tags&expand[]=mask_token&expand[]=trendingScore";
|
15 |
const HF_TOKEN = env.HF_TOKEN;
|
|
|
43 |
|
44 |
const models: ModelWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
|
45 |
|
46 |
+
cache = { models };
|
47 |
return { models };
|
48 |
};
|