Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
machineuser
commited on
Commit
·
bbc4b1e
1
Parent(s):
9d298eb
Sync widgets demo
Browse files
packages/tasks/src/pipelines.ts
CHANGED
|
@@ -435,6 +435,20 @@ export const PIPELINE_DATA = {
|
|
| 435 |
},
|
| 436 |
"image-to-image": {
|
| 437 |
name: "Image-to-Image",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 438 |
modality: "cv",
|
| 439 |
color: "indigo",
|
| 440 |
},
|
|
|
|
| 435 |
},
|
| 436 |
"image-to-image": {
|
| 437 |
name: "Image-to-Image",
|
| 438 |
+
subtasks: [
|
| 439 |
+
{
|
| 440 |
+
type: "image-inpainting",
|
| 441 |
+
name: "Image Inpainting",
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
type: "image-colorization",
|
| 445 |
+
name: "Image Colorization",
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
type: "super-resolution",
|
| 449 |
+
name: "Super Resolution",
|
| 450 |
+
},
|
| 451 |
+
],
|
| 452 |
modality: "cv",
|
| 453 |
color: "indigo",
|
| 454 |
},
|
packages/tasks/src/tasks/index.ts
CHANGED
|
@@ -40,18 +40,18 @@ import type { ModelLibraryKey } from "../model-libraries";
|
|
| 40 |
* Model libraries compatible with each ML task
|
| 41 |
*/
|
| 42 |
export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
| 43 |
-
"audio-classification": ["speechbrain", "transformers"],
|
| 44 |
"audio-to-audio": ["asteroid", "speechbrain"],
|
| 45 |
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
|
| 46 |
conversational: ["transformers"],
|
| 47 |
-
"depth-estimation": ["transformers"],
|
| 48 |
-
"document-question-answering": ["transformers"],
|
| 49 |
"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
|
| 50 |
"fill-mask": ["transformers", "transformers.js"],
|
| 51 |
"graph-ml": ["transformers"],
|
| 52 |
"image-classification": ["keras", "timm", "transformers", "transformers.js"],
|
| 53 |
"image-segmentation": ["transformers", "transformers.js"],
|
| 54 |
-
"image-to-image": ["diffusers"],
|
| 55 |
"image-to-text": ["transformers.js"],
|
| 56 |
"image-to-video": ["diffusers"],
|
| 57 |
"video-classification": ["transformers"],
|
|
@@ -73,8 +73,8 @@ export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
|
| 73 |
"text-generation": ["transformers", "transformers.js"],
|
| 74 |
"text-retrieval": [],
|
| 75 |
"text-to-image": ["diffusers"],
|
| 76 |
-
"text-to-speech": ["espnet", "tensorflowtts", "transformers"],
|
| 77 |
-
"text-to-audio": ["transformers"],
|
| 78 |
"text-to-video": ["diffusers"],
|
| 79 |
"text2text-generation": ["transformers", "transformers.js"],
|
| 80 |
"time-series-forecasting": [],
|
|
@@ -89,11 +89,11 @@ export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
|
| 89 |
],
|
| 90 |
translation: ["transformers", "transformers.js"],
|
| 91 |
"unconditional-image-generation": ["diffusers"],
|
| 92 |
-
"visual-question-answering": ["transformers"],
|
| 93 |
"voice-activity-detection": [],
|
| 94 |
"zero-shot-classification": ["transformers", "transformers.js"],
|
| 95 |
"zero-shot-image-classification": ["transformers", "transformers.js"],
|
| 96 |
-
"zero-shot-object-detection": ["transformers"],
|
| 97 |
};
|
| 98 |
|
| 99 |
/**
|
|
|
|
| 40 |
* Model libraries compatible with each ML task
|
| 41 |
*/
|
| 42 |
export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
| 43 |
+
"audio-classification": ["speechbrain", "transformers", "transformers.js"],
|
| 44 |
"audio-to-audio": ["asteroid", "speechbrain"],
|
| 45 |
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
|
| 46 |
conversational: ["transformers"],
|
| 47 |
+
"depth-estimation": ["transformers", "transformers.js"],
|
| 48 |
+
"document-question-answering": ["transformers", "transformers.js"],
|
| 49 |
"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
|
| 50 |
"fill-mask": ["transformers", "transformers.js"],
|
| 51 |
"graph-ml": ["transformers"],
|
| 52 |
"image-classification": ["keras", "timm", "transformers", "transformers.js"],
|
| 53 |
"image-segmentation": ["transformers", "transformers.js"],
|
| 54 |
+
"image-to-image": ["diffusers", "transformers.js"],
|
| 55 |
"image-to-text": ["transformers.js"],
|
| 56 |
"image-to-video": ["diffusers"],
|
| 57 |
"video-classification": ["transformers"],
|
|
|
|
| 73 |
"text-generation": ["transformers", "transformers.js"],
|
| 74 |
"text-retrieval": [],
|
| 75 |
"text-to-image": ["diffusers"],
|
| 76 |
+
"text-to-speech": ["espnet", "tensorflowtts", "transformers", "transformers.js"],
|
| 77 |
+
"text-to-audio": ["transformers", "transformers.js"],
|
| 78 |
"text-to-video": ["diffusers"],
|
| 79 |
"text2text-generation": ["transformers", "transformers.js"],
|
| 80 |
"time-series-forecasting": [],
|
|
|
|
| 89 |
],
|
| 90 |
translation: ["transformers", "transformers.js"],
|
| 91 |
"unconditional-image-generation": ["diffusers"],
|
| 92 |
+
"visual-question-answering": ["transformers", "transformers.js"],
|
| 93 |
"voice-activity-detection": [],
|
| 94 |
"zero-shot-classification": ["transformers", "transformers.js"],
|
| 95 |
"zero-shot-image-classification": ["transformers", "transformers.js"],
|
| 96 |
+
"zero-shot-object-detection": ["transformers", "transformers.js"],
|
| 97 |
};
|
| 98 |
|
| 99 |
/**
|