Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 4,922 Bytes
b2ecf7d 9d298eb b2ecf7d 86c4ad7 9d298eb 86c4ad7 b2ecf7d 6c7ce80 86c4ad7 b2ecf7d 02f87b5 b2ecf7d 02f87b5 b2ecf7d 02f87b5 b2ecf7d 86c4ad7 b2ecf7d 86c4ad7 b2ecf7d 86c4ad7 b2ecf7d 86c4ad7 b2ecf7d 86c4ad7 b2ecf7d 02f87b5 b2ecf7d 7b4cf0b 02f87b5 b2ecf7d 02f87b5 b2ecf7d 02f87b5 b2ecf7d 7b4cf0b b2ecf7d 02f87b5 b2ecf7d 86c4ad7 b2ecf7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
<script lang="ts">
import { InferenceDisplayability } from "@huggingface/tasks";
import { type WidgetProps, type ModelLoadInfo, LoadState, ComputeType } from "../types.js";
import WidgetModelLoading from "../WidgetModelLoading/WidgetModelLoading.svelte";
import IconAzureML from "../../..//Icons/IconAzureML.svelte";
import IconInfo from "../../..//Icons/IconInfo.svelte";
import { modelLoadStates } from "../../stores.js";
export let model: WidgetProps["model"];
export let computeTime: string = "";
export let error: string = "";
export let modelLoading = {
isLoading: false,
estimatedTime: 0,
};
$: modelTooBig = $modelLoadStates[model.id]?.state === "TooBig";
const state = {
[LoadState.Loadable]: "This model can be loaded on Inference API (serverless).",
[LoadState.Loaded]: "This model is currently loaded and running on Inference API (serverless).",
[LoadState.TooBig]:
"Model is too large to load onto on Inference API (serverless). To try the model, launch it on Inference Endpoints (dedicated) instead.",
[LoadState.Error]: "⚠️ This model could not be loaded on Inference API (serverless). ⚠️",
} as const;
const azureState = {
[LoadState.Loadable]: "This model can be loaded loaded on AzureML Managed Endpoint",
[LoadState.Loaded]: "This model is loaded and running on AzureML Managed Endpoint",
[LoadState.TooBig]:
"Model is too large to load onto on Inference API (serverless). To try the model, launch it on Inference Endpoints (dedicated) instead.",
[LoadState.Error]: "⚠️ This model could not be loaded.",
} as const;
function getStatusReport(modelLoadStates: ModelLoadInfo | undefined, statuses: Record<LoadState, string>): string {
if (!modelLoadStates) {
return "Model state unknown";
}
return statuses[modelLoadStates.state];
}
</script>
<div class="mt-2">
<div class="text-xs text-gray-400">
{#if model.id === "bigscience/bloom"}
<div class="flex items-baseline">
<div class="flex items-center whitespace-nowrap text-gray-700">
<IconAzureML classNames="mr-1 flex-none" /> Powered by
<a
class="underline hover:text-gray-800"
href="https://azure.microsoft.com/products/machine-learning"
target="_blank">AzureML</a
>
</div>
<div class="border-dotter mx-2 flex flex-1 -translate-y-px border-b border-gray-100" />
<div>
{@html getStatusReport($modelLoadStates[model.id], azureState)}
</div>
</div>
{:else if computeTime}
Computation time on {$modelLoadStates[model.id]?.compute_type ?? ComputeType.CPU}: {computeTime}
{:else if (model.inference === InferenceDisplayability.Yes || model.pipeline_tag === "reinforcement-learning") && !modelTooBig}
{@html getStatusReport($modelLoadStates[model.id], state)}
{:else if model.inference === InferenceDisplayability.ExplicitOptOut}
<span class="text-sm text-gray-500">Inference API (serverless) has been turned off for this model.</span>
{:else if model.inference === InferenceDisplayability.CustomCode}
<span class="text-sm text-gray-500"
>Inference API (serverless) does not yet support model repos that contain custom code.</span
>
{:else if model.inference === InferenceDisplayability.LibraryNotDetected}
<span class="text-sm text-gray-500">
Unable to determine this model's library. Check the
<a class="color-inherit" href="/docs/hub/model-cards#specifying-a-library">
docs <IconInfo classNames="inline" />
</a>.
</span>
{:else if model.inference === InferenceDisplayability.PipelineNotDetected}
<span class="text-sm text-gray-500">
Unable to determine this model’s pipeline type. Check the
<a class="color-inherit" href="/docs/hub/models-widgets#enabling-a-widget">
docs <IconInfo classNames="inline" />
</a>.
</span>
{:else if model.inference === InferenceDisplayability.PipelineLibraryPairNotSupported}
<span class="text-sm text-gray-500">
Inference API (serverless) does not yet support {model.library_name} models for this pipeline type.
</span>
{:else if modelTooBig}
<span class="text-sm text-gray-500">
Model is too large to load in Inference API (serverless). To try the model, launch it on <a
class="underline"
href="https://ui.endpoints.huggingface.co/new?repository={encodeURIComponent(model.id)}"
>Inference Endpoints (dedicated)</a
>
instead.
</span>
{:else}
<!-- added as a failsafe but this case cannot currently happen -->
<span class="text-sm text-gray-500">
Inference API (serverless) is disabled for an unknown reason. Please open a
<a class="color-inherit underline" href="/{model.id}/discussions/new">Discussion in the Community tab</a>.
</span>
{/if}
</div>
{#if error}
<div class="alert alert-error mt-3">{error}</div>
{/if}
{#if modelLoading.isLoading}
<WidgetModelLoading estimatedTime={modelLoading.estimatedTime} />
{/if}
</div>
|