Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
machineuser
commited on
Commit
·
02f87b5
1
Parent(s):
f108317
Sync widgets demo
Browse files- packages/tasks/src/model-data.ts +1 -1
- packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte +1 -1
- packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte +10 -10
- packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte +3 -3
- packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts +2 -2
packages/tasks/src/model-data.ts
CHANGED
@@ -80,7 +80,7 @@ export interface ModelData {
|
|
80 |
*/
|
81 |
widgetData?: WidgetExample[] | undefined;
|
82 |
/**
|
83 |
-
* Parameters that will be used by the widget when calling Inference
|
84 |
* https://huggingface.co/docs/api-inference/detailed_parameters
|
85 |
*
|
86 |
* can be set in the model card metadata (under `inference/parameters`)
|
|
|
80 |
*/
|
81 |
widgetData?: WidgetExample[] | undefined;
|
82 |
/**
|
83 |
+
* Parameters that will be used by the widget when calling Inference API (serverless)
|
84 |
* https://huggingface.co/docs/api-inference/detailed_parameters
|
85 |
*
|
86 |
* can be set in the model card metadata (under `inference/parameters`)
|
packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte
CHANGED
@@ -53,7 +53,7 @@
|
|
53 |
<div class="flex items-center text-lg">
|
54 |
{#if !isDisabled}
|
55 |
<IconLightning classNames="-ml-1 mr-1 text-yellow-500" />
|
56 |
-
Inference
|
57 |
{:else}
|
58 |
Inference Examples
|
59 |
{/if}
|
|
|
53 |
<div class="flex items-center text-lg">
|
54 |
{#if !isDisabled}
|
55 |
<IconLightning classNames="-ml-1 mr-1 text-yellow-500" />
|
56 |
+
Inference API
|
57 |
{:else}
|
58 |
Inference Examples
|
59 |
{/if}
|
packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte
CHANGED
@@ -17,18 +17,18 @@
|
|
17 |
$: modelTooBig = $modelLoadStates[model.id]?.state === "TooBig";
|
18 |
|
19 |
const state = {
|
20 |
-
[LoadState.Loadable]: "This model can be loaded on Inference
|
21 |
-
[LoadState.Loaded]: "This model is currently loaded and running on Inference
|
22 |
[LoadState.TooBig]:
|
23 |
-
"Model is too large to load onto on Inference
|
24 |
-
[LoadState.Error]: "⚠️ This model could not be loaded on Inference
|
25 |
} as const;
|
26 |
|
27 |
const azureState = {
|
28 |
[LoadState.Loadable]: "This model can be loaded loaded on AzureML Managed Endpoint",
|
29 |
[LoadState.Loaded]: "This model is loaded and running on AzureML Managed Endpoint",
|
30 |
[LoadState.TooBig]:
|
31 |
-
"Model is too large to load onto on Inference
|
32 |
[LoadState.Error]: "⚠️ This model could not be loaded.",
|
33 |
} as const;
|
34 |
|
@@ -62,10 +62,10 @@
|
|
62 |
{:else if (model.inference === InferenceDisplayability.Yes || model.pipeline_tag === "reinforcement-learning") && !modelTooBig}
|
63 |
{@html getStatusReport($modelLoadStates[model.id], state)}
|
64 |
{:else if model.inference === InferenceDisplayability.ExplicitOptOut}
|
65 |
-
<span class="text-sm text-gray-500">Inference
|
66 |
{:else if model.inference === InferenceDisplayability.CustomCode}
|
67 |
<span class="text-sm text-gray-500"
|
68 |
-
>Inference
|
69 |
>
|
70 |
{:else if model.inference === InferenceDisplayability.LibraryNotDetected}
|
71 |
<span class="text-sm text-gray-500">
|
@@ -83,11 +83,11 @@
|
|
83 |
</span>
|
84 |
{:else if model.inference === InferenceDisplayability.PipelineLibraryPairNotSupported}
|
85 |
<span class="text-sm text-gray-500">
|
86 |
-
Inference
|
87 |
</span>
|
88 |
{:else if modelTooBig}
|
89 |
<span class="text-sm text-gray-500">
|
90 |
-
Model is too large to load in Inference
|
91 |
class="underline"
|
92 |
href="https://ui.endpoints.huggingface.co/new?repository={encodeURIComponent(model.id)}"
|
93 |
>Inference Endpoints (dedicated)</a
|
@@ -97,7 +97,7 @@
|
|
97 |
{:else}
|
98 |
<!-- added as a failsafe but this case cannot currently happen -->
|
99 |
<span class="text-sm text-gray-500">
|
100 |
-
Inference
|
101 |
<a class="color-inherit underline" href="/{model.id}/discussions/new">Discussion in the Community tab</a>.
|
102 |
</span>
|
103 |
{/if}
|
|
|
17 |
$: modelTooBig = $modelLoadStates[model.id]?.state === "TooBig";
|
18 |
|
19 |
const state = {
|
20 |
+
[LoadState.Loadable]: "This model can be loaded on Inference API (serverless).",
|
21 |
+
[LoadState.Loaded]: "This model is currently loaded and running on Inference API (serverless).",
|
22 |
[LoadState.TooBig]:
|
23 |
+
"Model is too large to load onto on Inference API (serverless). To try the model, launch it on Inference Endpoints (dedicated) instead.",
|
24 |
+
[LoadState.Error]: "⚠️ This model could not be loaded on Inference API (serverless). ⚠️",
|
25 |
} as const;
|
26 |
|
27 |
const azureState = {
|
28 |
[LoadState.Loadable]: "This model can be loaded loaded on AzureML Managed Endpoint",
|
29 |
[LoadState.Loaded]: "This model is loaded and running on AzureML Managed Endpoint",
|
30 |
[LoadState.TooBig]:
|
31 |
+
"Model is too large to load onto on Inference API (serverless). To try the model, launch it on Inference Endpoints (dedicated) instead.",
|
32 |
[LoadState.Error]: "⚠️ This model could not be loaded.",
|
33 |
} as const;
|
34 |
|
|
|
62 |
{:else if (model.inference === InferenceDisplayability.Yes || model.pipeline_tag === "reinforcement-learning") && !modelTooBig}
|
63 |
{@html getStatusReport($modelLoadStates[model.id], state)}
|
64 |
{:else if model.inference === InferenceDisplayability.ExplicitOptOut}
|
65 |
+
<span class="text-sm text-gray-500">Inference API (serverless) has been turned off for this model.</span>
|
66 |
{:else if model.inference === InferenceDisplayability.CustomCode}
|
67 |
<span class="text-sm text-gray-500"
|
68 |
+
>Inference API (serverless) does not yet support model repos that contain custom code.</span
|
69 |
>
|
70 |
{:else if model.inference === InferenceDisplayability.LibraryNotDetected}
|
71 |
<span class="text-sm text-gray-500">
|
|
|
83 |
</span>
|
84 |
{:else if model.inference === InferenceDisplayability.PipelineLibraryPairNotSupported}
|
85 |
<span class="text-sm text-gray-500">
|
86 |
+
Inference API (serverless) does not yet support {model.library_name} models for this pipeline type.
|
87 |
</span>
|
88 |
{:else if modelTooBig}
|
89 |
<span class="text-sm text-gray-500">
|
90 |
+
Model is too large to load in Inference API (serverless). To try the model, launch it on <a
|
91 |
class="underline"
|
92 |
href="https://ui.endpoints.huggingface.co/new?repository={encodeURIComponent(model.id)}"
|
93 |
>Inference Endpoints (dedicated)</a
|
|
|
97 |
{:else}
|
98 |
<!-- added as a failsafe but this case cannot currently happen -->
|
99 |
<span class="text-sm text-gray-500">
|
100 |
+
Inference API (serverless) is disabled for an unknown reason. Please open a
|
101 |
<a class="color-inherit underline" href="/{model.id}/discussions/new">Discussion in the Community tab</a>.
|
102 |
</span>
|
103 |
{/if}
|
packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte
CHANGED
@@ -5,13 +5,13 @@
|
|
5 |
<div class="blankslate">
|
6 |
<div class="subtitle text-xs text-gray-500">
|
7 |
<div class="loaded mt-2 {currentState !== 'loaded' ? 'hidden' : ''}">
|
8 |
-
This model is currently loaded and running on Inference
|
9 |
</div>
|
10 |
<div class="error mt-2 {currentState !== 'error' ? 'hidden' : ''}">
|
11 |
-
⚠️ This model could not be loaded in Inference
|
12 |
</div>
|
13 |
<div class="unknown mt-2 {currentState !== 'unknown' ? 'hidden' : ''}">
|
14 |
-
This model can be loaded in Inference
|
15 |
</div>
|
16 |
</div>
|
17 |
</div>
|
|
|
5 |
<div class="blankslate">
|
6 |
<div class="subtitle text-xs text-gray-500">
|
7 |
<div class="loaded mt-2 {currentState !== 'loaded' ? 'hidden' : ''}">
|
8 |
+
This model is currently loaded and running on Inference API (serverless).
|
9 |
</div>
|
10 |
<div class="error mt-2 {currentState !== 'error' ? 'hidden' : ''}">
|
11 |
+
⚠️ This model could not be loaded in Inference API (serverless). ⚠️
|
12 |
</div>
|
13 |
<div class="unknown mt-2 {currentState !== 'unknown' ? 'hidden' : ''}">
|
14 |
+
This model can be loaded in Inference API (serverless).
|
15 |
</div>
|
16 |
</div>
|
17 |
</div>
|
packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts
CHANGED
@@ -84,7 +84,7 @@ export async function callInferenceApi<T>(
|
|
84 |
requestBody: Record<string, unknown>,
|
85 |
apiToken = "",
|
86 |
outputParsingFn: (x: unknown) => T,
|
87 |
-
waitForModel = false, // If true, the server will only respond once the model has been loaded on Inference
|
88 |
includeCredentials = false,
|
89 |
isOnLoadCall = false, // If true, the server will try to answer from cache and not do anything if not
|
90 |
useCache = true
|
@@ -184,7 +184,7 @@ export async function getModelLoadInfo(
|
|
184 |
}
|
185 |
}
|
186 |
|
187 |
-
// Extend requestBody with user supplied parameters for Inference
|
188 |
export function addInferenceParameters(requestBody: Record<string, unknown>, model: ModelData): void {
|
189 |
const inference = model?.cardData?.inference;
|
190 |
if (typeof inference === "object") {
|
|
|
84 |
requestBody: Record<string, unknown>,
|
85 |
apiToken = "",
|
86 |
outputParsingFn: (x: unknown) => T,
|
87 |
+
waitForModel = false, // If true, the server will only respond once the model has been loaded on Inference API (serverless)
|
88 |
includeCredentials = false,
|
89 |
isOnLoadCall = false, // If true, the server will try to answer from cache and not do anything if not
|
90 |
useCache = true
|
|
|
184 |
}
|
185 |
}
|
186 |
|
187 |
+
// Extend requestBody with user supplied parameters for Inference API (serverless)
|
188 |
export function addInferenceParameters(requestBody: Record<string, unknown>, model: ModelData): void {
|
189 |
const inference = model?.cardData?.inference;
|
190 |
if (typeof inference === "object") {
|