Thomas G. Lopes commited on
Commit
058d10c
·
1 Parent(s): 2dd779d

get imgt2t models; render eye icon on them; add upload image icon

Browse files
src/lib/components/debug-menu.svelte CHANGED
@@ -8,6 +8,7 @@
8
  import { showQuotaModal } from "./quota-modal.svelte";
9
  import type { ToastData } from "./toaster.svelte.js";
10
  import { addToast } from "./toaster.svelte.js";
 
11
 
12
  let innerWidth = $state<number>();
13
  let innerHeight = $state<number>();
@@ -31,6 +32,12 @@
31
  console.log(session.$);
32
  },
33
  },
 
 
 
 
 
 
34
  {
35
  label: "Test prompt",
36
  cb: async () => {
 
8
  import { showQuotaModal } from "./quota-modal.svelte";
9
  import type { ToastData } from "./toaster.svelte.js";
10
  import { addToast } from "./toaster.svelte.js";
11
+ import { models } from "$lib/state/models.svelte";
12
 
13
  let innerWidth = $state<number>();
14
  let innerHeight = $state<number>();
 
32
  console.log(session.$);
33
  },
34
  },
35
+ {
36
+ label: "Log models to console",
37
+ cb: () => {
38
+ console.log(models.all);
39
+ },
40
+ },
41
  {
42
  label: "Test prompt",
43
  cb: async () => {
src/lib/components/inference-playground/message.svelte CHANGED
@@ -1,6 +1,8 @@
1
  <script lang="ts">
2
  import { TextareaAutosize } from "$lib/spells/textarea-autosize.svelte.js";
3
  import type { ConversationMessage } from "$lib/types.js";
 
 
4
 
5
  type Props = {
6
  content: ConversationMessage["content"];
@@ -20,28 +22,62 @@
20
  </script>
21
 
22
  <div
23
- class=" group/message group grid grid-cols-[1fr_2.5rem] items-start gap-2 border-b px-3.5 pt-4 pb-6 hover:bg-gray-100/70 @-2xl:grid-cols-[130px_1fr_2.5rem] @2xl:grid-rows-1 @2xl:gap-4 @2xl:px-6 dark:border-gray-800 dark:hover:bg-gray-800/30"
 
24
  class:pointer-events-none={loading}
25
  >
26
- <div class="col-span-2 pt-3 pb-1 text-sm font-semibold uppercase @2xl:col-span-1 @2xl:pb-2">
27
  {role}
28
  </div>
29
- <!-- svelte-ignore a11y_autofocus -->
30
- <!-- svelte-ignore a11y_positive_tabindex -->
31
- <textarea
32
- bind:this={element}
33
- {autofocus}
34
- bind:value={content}
35
- placeholder="Enter {role} message"
36
- class="resize-none overflow-hidden rounded-sm bg-transparent px-2 py-2.5 ring-gray-100 outline-none group-hover/message:ring-3 hover:bg-white focus:bg-white focus:ring-3 @2xl:px-3 dark:ring-gray-600 dark:hover:bg-gray-900 dark:focus:bg-gray-900"
37
- rows="1"
38
- tabindex="2"
39
- ></textarea>
40
- <button
41
- tabindex="0"
42
- onclick={onDelete}
43
- type="button"
44
- class="mt-1.5 size-8 rounded-lg border border-gray-200 bg-white text-xs font-medium text-gray-900 group-hover/message:block hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 focus:outline-hidden sm:hidden dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
45
- >✕</button
46
- >
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  </div>
 
1
  <script lang="ts">
2
  import { TextareaAutosize } from "$lib/spells/textarea-autosize.svelte.js";
3
  import type { ConversationMessage } from "$lib/types.js";
4
+ import Tooltip from "$lib/components/tooltip.svelte";
5
+ import IconImage from "~icons/carbon/image-reference";
6
 
7
  type Props = {
8
  content: ConversationMessage["content"];
 
22
  </script>
23
 
24
  <div
25
+ class="group/message group flex flex-col items-start gap-x-4 gap-y-2 border-b px-3.5 pt-4 pb-6 hover:bg-gray-100/70
26
+ @2xl:flex-row @2xl:px-6 dark:border-gray-800 dark:hover:bg-gray-800/30"
27
  class:pointer-events-none={loading}
28
  >
29
+ <div class="pt-3 text-sm font-semibold uppercase @2xl:basis-[130px]">
30
  {role}
31
  </div>
32
+ <div class="flex w-full items-center gap-4">
33
+ <!-- svelte-ignore a11y_autofocus -->
34
+ <!-- svelte-ignore a11y_positive_tabindex -->
35
+ <textarea
36
+ bind:this={element}
37
+ {autofocus}
38
+ bind:value={content}
39
+ placeholder="Enter {role} message"
40
+ class="grow resize-none overflow-hidden rounded-lg bg-transparent px-2 py-2.5 ring-gray-100 outline-none group-hover/message:ring-3 hover:bg-white focus:bg-white focus:ring-3 @2xl:px-3 dark:ring-gray-600 dark:hover:bg-gray-900 dark:focus:bg-gray-900"
41
+ rows="1"
42
+ tabindex="2"
43
+ ></textarea>
44
+
45
+ <Tooltip openDelay={250}>
46
+ {#snippet trigger(tooltip)}
47
+ <button
48
+ tabindex="0"
49
+ onclick={onDelete}
50
+ type="button"
51
+ class="grid size-8 place-items-center rounded-lg border border-gray-200 bg-white text-xs font-medium text-gray-900
52
+ group-focus-within/message:visible group-hover/message:visible hover:bg-gray-100
53
+ hover:text-blue-700 focus:z-10 focus:ring-4
54
+ focus:ring-gray-100 focus:outline-hidden sm:invisible dark:border-gray-600 dark:bg-gray-800
55
+ dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
56
+ {...tooltip.trigger}
57
+ >
58
+ <IconImage />
59
+ </button>
60
+ {/snippet}
61
+ Add image
62
+ </Tooltip>
63
+
64
+ <Tooltip>
65
+ {#snippet trigger(tooltip)}
66
+ <button
67
+ tabindex="0"
68
+ onclick={onDelete}
69
+ type="button"
70
+ class="size-8 rounded-lg border border-gray-200 bg-white text-xs font-medium text-gray-900
71
+ group-focus-within/message:visible group-hover/message:visible hover:bg-gray-100
72
+ hover:text-blue-700 focus:z-10 focus:ring-4
73
+ focus:ring-gray-100 focus:outline-hidden sm:invisible dark:border-gray-600 dark:bg-gray-800
74
+ dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
75
+ {...tooltip.trigger}
76
+ >
77
+
78
+ </button>
79
+ {/snippet}
80
+ Delete
81
+ </Tooltip>
82
+ </div>
83
  </div>
src/lib/components/inference-playground/model-selector-modal.svelte CHANGED
@@ -9,6 +9,7 @@
9
  import { watch } from "runed";
10
  import IconSearch from "~icons/carbon/search";
11
  import IconStar from "~icons/carbon/star";
 
12
 
13
  interface Props {
14
  onModelSelect?: (model: string) => void;
@@ -145,6 +146,11 @@
145
  class="mx-1 text-gray-300 dark:text-gray-700">/</span
146
  ><span class="text-black dark:text-white">{modelName}</span></span
147
  >
 
 
 
 
 
148
  </button>
149
  {/snippet}
150
  {#if trending.length > 0}
 
9
  import { watch } from "runed";
10
  import IconSearch from "~icons/carbon/search";
11
  import IconStar from "~icons/carbon/star";
12
+ import IconEye from "~icons/carbon/view";
13
 
14
  interface Props {
15
  onModelSelect?: (model: string) => void;
 
146
  class="mx-1 text-gray-300 dark:text-gray-700">/</span
147
  ><span class="text-black dark:text-white">{modelName}</span></span
148
  >
149
+ {#if model.pipeline_tag === "image-text-to-text"}
150
+ <div class="lucide lucide-star ml-auto size-4 text-white">
151
+ <IconEye />
152
+ </div>
153
+ {/if}
154
  </button>
155
  {/snippet}
156
  {#if trending.length > 0}
src/lib/components/tooltip.svelte ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ import { Tooltip, type TooltipProps } from "melt/builders";
3
+ import { type ComponentProps, type Extracted } from "melt";
4
+ import type { Snippet } from "svelte";
5
+
6
+ interface Props {
7
+ children: Snippet;
8
+ trigger: Snippet<[Tooltip]>;
9
+ placement?: NonNullable<Extracted<TooltipProps["computePositionOptions"]>>["placement"];
10
+ openDelay?: ComponentProps<TooltipProps>["openDelay"];
11
+ }
12
+ const { children, trigger, placement = "top", openDelay }: Props = $props();
13
+
14
+ const tooltip = new Tooltip({
15
+ forceVisible: true,
16
+ computePositionOptions: () => ({ placement }),
17
+ openDelay: () => openDelay,
18
+ });
19
+ </script>
20
+
21
+ {@render trigger(tooltip)}
22
+
23
+ <div {...tooltip.content} class="rounded-xl bg-white p-0 shadow-xl dark:bg-gray-700">
24
+ <div {...tooltip.arrow} class="rounded-tl"></div>
25
+ <p class="px-4 py-1 text-gray-700 dark:text-white">{@render children()}</p>
26
+ </div>
27
+
28
+ <style>
29
+ [data-melt-tooltip-content] {
30
+ border: 0;
31
+
32
+ position: absolute;
33
+ pointer-events: none;
34
+ opacity: 0;
35
+
36
+ transform: scale(0.9);
37
+
38
+ transition: 0.3s;
39
+ transition-property: opacity, transform;
40
+ }
41
+
42
+ [data-melt-tooltip-content][data-open] {
43
+ pointer-events: auto;
44
+ opacity: 1;
45
+
46
+ transform: scale(1);
47
+ }
48
+ </style>
src/lib/types.ts CHANGED
@@ -169,6 +169,7 @@ export enum LibraryName {
169
 
170
  export enum PipelineTag {
171
  TextGeneration = "text-generation",
 
172
  }
173
 
174
  export type MaybeGetter<T> = T | (() => T);
 
169
 
170
  export enum PipelineTag {
171
  TextGeneration = "text-generation",
172
+ ImageTextToText = "image-text-to-text",
173
  }
174
 
175
  export type MaybeGetter<T> = T | (() => T);
src/routes/+layout.svelte CHANGED
@@ -3,6 +3,7 @@
3
  import Prompts from "$lib/components/prompts.svelte";
4
  import QuotaModal from "$lib/components/quota-modal.svelte";
5
  import "../app.css";
 
6
  interface Props {
7
  children?: import("svelte").Snippet;
8
  }
 
3
  import Prompts from "$lib/components/prompts.svelte";
4
  import QuotaModal from "$lib/components/quota-modal.svelte";
5
  import "../app.css";
6
+
7
  interface Props {
8
  children?: import("svelte").Snippet;
9
  }
src/routes/api/models/+server.ts CHANGED
@@ -5,68 +5,117 @@ import { dev } from "$app/environment";
5
 
6
  let cache: ModelWithTokenizer[] | undefined;
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  export const GET: RequestHandler = async ({ fetch }) => {
9
  if (cache?.length && dev) {
10
  console.log("Skipping load, using in memory cache");
11
  return json(cache);
12
  }
13
 
14
- const apiUrl =
15
- "https://huggingface.co/api/models?pipeline_tag=text-generation&filter=conversational&inference_provider=all&limit=100&expand[]=inferenceProviderMapping&expand[]=config&expand[]=library_name&expand[]=pipeline_tag&expand[]=tags&expand[]=mask_token&expand[]=trendingScore";
16
-
17
- const res = await fetch(apiUrl, {
18
- credentials: "include",
19
- headers: {
20
- "Upgrade-Insecure-Requests": "1",
21
- "Sec-Fetch-Dest": "document",
22
- "Sec-Fetch-Mode": "navigate",
23
- "Sec-Fetch-Site": "none",
24
- "Sec-Fetch-User": "?1",
25
- "Priority": "u=0, i",
26
- "Pragma": "no-cache",
27
- "Cache-Control": "no-cache",
28
- },
29
- method: "GET",
30
- mode: "cors",
31
- });
32
-
33
- if (!res.ok) {
34
- console.error(`Error fetching warm models`, res.status, res.statusText);
35
- return json({ models: [] });
36
- }
37
 
38
- const compatibleModels: Model[] = await res.json();
39
- compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
40
-
41
- const promises = compatibleModels.map(async model => {
42
- const configUrl = `https://huggingface.co/${model.id}/raw/main/tokenizer_config.json`;
43
- const res = await fetch(configUrl, {
44
- credentials: "include",
45
- headers: {
46
- "Upgrade-Insecure-Requests": "1",
47
- "Sec-Fetch-Dest": "document",
48
- "Sec-Fetch-Mode": "navigate",
49
- "Sec-Fetch-Site": "none",
50
- "Sec-Fetch-User": "?1",
51
- "Priority": "u=0, i",
52
- "Pragma": "no-cache",
53
- "Cache-Control": "no-cache",
54
- },
55
- method: "GET",
56
- mode: "cors",
57
- });
58
 
59
- if (!res.ok) {
60
- // console.error(`Error fetching tokenizer file for ${model.id}`, res.status, res.statusText);
61
- return null; // Ignore failed requests by returning null
 
 
 
62
  }
63
 
64
- const tokenizerConfig = await res.json();
65
- return { ...model, tokenizerConfig } satisfies ModelWithTokenizer;
66
- });
 
 
 
 
 
 
67
 
68
- const models: ModelWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
69
- cache = models;
 
 
 
 
 
 
 
70
 
71
- return json(cache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  };
 
5
 
6
  let cache: ModelWithTokenizer[] | undefined;
7
 
8
+ const headers: HeadersInit = {
9
+ "Upgrade-Insecure-Requests": "1",
10
+ "Sec-Fetch-Dest": "document",
11
+ "Sec-Fetch-Mode": "navigate",
12
+ "Sec-Fetch-Site": "none",
13
+ "Sec-Fetch-User": "?1",
14
+ "Priority": "u=0, i",
15
+ "Pragma": "no-cache",
16
+ "Cache-Control": "no-cache",
17
+ };
18
+
19
+ const requestInit: RequestInit = {
20
+ credentials: "include",
21
+ headers,
22
+ method: "GET",
23
+ mode: "cors",
24
+ };
25
+
26
+ interface ApiQueryParams {
27
+ pipeline_tag?: "text-generation" | "image-text-to-text";
28
+ filter: string;
29
+ inference_provider: string;
30
+ limit: number;
31
+ expand: string[];
32
+ }
33
+
34
+ const queryParams: ApiQueryParams = {
35
+ filter: "conversational",
36
+ inference_provider: "all",
37
+ limit: 100,
38
+ expand: ["inferenceProviderMapping", "config", "library_name", "pipeline_tag", "tags", "mask_token", "trendingScore"],
39
+ };
40
+
41
+ const baseUrl = "https://huggingface.co/api/models";
42
+
43
+ function buildApiUrl(params: ApiQueryParams): string {
44
+ const url = new URL(baseUrl);
45
+ // Add simple params
46
+ Object.entries(params).forEach(([key, value]) => {
47
+ if (!Array.isArray(value)) {
48
+ url.searchParams.append(key, String(value));
49
+ }
50
+ });
51
+ // Handle array params specially
52
+ params.expand.forEach(item => {
53
+ url.searchParams.append("expand[]", item);
54
+ });
55
+ return url.toString();
56
+ }
57
+
58
  export const GET: RequestHandler = async ({ fetch }) => {
59
  if (cache?.length && dev) {
60
  console.log("Skipping load, using in memory cache");
61
  return json(cache);
62
  }
63
 
64
+ try {
65
+ // Fetch both types of models in parallel
66
+ const textGenPromise = fetch(buildApiUrl({ ...queryParams, pipeline_tag: "text-generation" }), requestInit);
67
+ const imgText2TextPromise = fetch(buildApiUrl({ ...queryParams, pipeline_tag: "image-text-to-text" }), requestInit);
68
+ const [textGenResponse, imgText2TextResponse] = await Promise.all([textGenPromise, imgText2TextPromise]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ if (!textGenResponse.ok) {
71
+ console.error(`Error fetching text-generation models`, textGenResponse.status, textGenResponse.statusText);
72
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ if (!imgText2TextResponse.ok) {
75
+ console.error(
76
+ `Error fetching image-text-to-text models`,
77
+ imgText2TextResponse.status,
78
+ imgText2TextResponse.statusText
79
+ );
80
  }
81
 
82
+ // Parse the responses
83
+ const textGenModels: Model[] = textGenResponse.ok ? await textGenResponse.json() : [];
84
+ const imgText2TextModels: Model[] = imgText2TextResponse.ok ? await imgText2TextResponse.json() : [];
85
+
86
+ // Combine the models
87
+ const compatibleModels: Model[] = [...textGenModels, ...imgText2TextModels];
88
+
89
+ // Sort the models
90
+ compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
91
 
92
+ // Fetch tokenizer configs for each model
93
+ const promises = compatibleModels.map(async model => {
94
+ const configUrl = `https://huggingface.co/${model.id}/raw/main/tokenizer_config.json`;
95
+ const res = await fetch(configUrl, {
96
+ credentials: "include",
97
+ headers,
98
+ method: "GET",
99
+ mode: "cors",
100
+ });
101
 
102
+ if (!res.ok) {
103
+ // console.error(`Error fetching tokenizer file for ${model.id}`, res.status, res.statusText);
104
+ return null; // Ignore failed requests by returning null
105
+ }
106
+
107
+ const tokenizerConfig = await res.json();
108
+ return { ...model, tokenizerConfig } satisfies ModelWithTokenizer;
109
+ });
110
+
111
+ const models: ModelWithTokenizer[] = (await Promise.all(promises)).filter(
112
+ (model): model is ModelWithTokenizer => model !== null
113
+ );
114
+ cache = models;
115
+
116
+ return json(cache);
117
+ } catch (error) {
118
+ console.error("Error fetching models:", error);
119
+ return json([]);
120
+ }
121
  };