Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
machineuser
commited on
Commit
·
5852013
1
Parent(s):
4e75983
Sync widgets demo
Browse files- packages/tasks/src/tasks/depth-estimation/data.ts +8 -4
- packages/tasks/src/tasks/mask-generation/data.ts +42 -5
- packages/tasks/src/tasks/text-generation/data.ts +22 -38
- packages/tasks/src/tasks/text-to-image/data.ts +6 -2
- packages/tasks/src/tasks/zero-shot-image-classification/data.ts +8 -5
- packages/tasks/src/tasks/zero-shot-object-detection/data.ts +6 -1
packages/tasks/src/tasks/depth-estimation/data.ts
CHANGED
@@ -28,8 +28,8 @@ const taskData: TaskDataCustom = {
|
|
28 |
id: "Intel/dpt-large",
|
29 |
},
|
30 |
{
|
31 |
-
description: "Strong Depth Estimation model trained on
|
32 |
-
id: "
|
33 |
},
|
34 |
{
|
35 |
description: "A strong monocular depth estimation model.",
|
@@ -42,8 +42,12 @@ const taskData: TaskDataCustom = {
|
|
42 |
id: "radames/dpt-depth-estimation-3d-voxels",
|
43 |
},
|
44 |
{
|
45 |
-
description: "An application
|
46 |
-
id: "
|
|
|
|
|
|
|
|
|
47 |
},
|
48 |
],
|
49 |
summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
|
|
|
28 |
id: "Intel/dpt-large",
|
29 |
},
|
30 |
{
|
31 |
+
description: "Strong Depth Estimation model trained on a big compilation of datasets.",
|
32 |
+
id: "LiheYoung/depth-anything-large-hf",
|
33 |
},
|
34 |
{
|
35 |
description: "A strong monocular depth estimation model.",
|
|
|
42 |
id: "radames/dpt-depth-estimation-3d-voxels",
|
43 |
},
|
44 |
{
|
45 |
+
description: "An application to compare the outputs of different depth estimation models.",
|
46 |
+
id: "LiheYoung/Depth-Anything",
|
47 |
+
},
|
48 |
+
{
|
49 |
+
description: "An application to try state-of-the-art depth estimation.",
|
50 |
+
id: "merve/compare_depth_models",
|
51 |
},
|
52 |
],
|
53 |
summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
|
packages/tasks/src/tasks/mask-generation/data.ts
CHANGED
@@ -3,14 +3,51 @@ import type { TaskDataCustom } from "..";
|
|
3 |
const taskData: TaskDataCustom = {
|
4 |
datasets: [],
|
5 |
demo: {
|
6 |
-
inputs: [
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
},
|
9 |
metrics: [],
|
10 |
-
models: [
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
summary:
|
13 |
-
"Mask generation is
|
14 |
widgetModels: [],
|
15 |
youtubeId: "",
|
16 |
};
|
|
|
3 |
const taskData: TaskDataCustom = {
|
4 |
datasets: [],
|
5 |
demo: {
|
6 |
+
inputs: [
|
7 |
+
{
|
8 |
+
filename: "mask-generation-input.png",
|
9 |
+
type: "img",
|
10 |
+
},
|
11 |
+
],
|
12 |
+
outputs: [
|
13 |
+
{
|
14 |
+
filename: "mask-generation-output.png",
|
15 |
+
type: "img",
|
16 |
+
},
|
17 |
+
],
|
18 |
},
|
19 |
metrics: [],
|
20 |
+
models: [
|
21 |
+
{
|
22 |
+
description: "Small yet powerful mask generation model.",
|
23 |
+
id: "Zigeng/SlimSAM-uniform-50",
|
24 |
+
},
|
25 |
+
{
|
26 |
+
description: "Very strong mask generation model.",
|
27 |
+
id: "facebook/sam-vit-huge",
|
28 |
+
},
|
29 |
+
],
|
30 |
+
spaces: [
|
31 |
+
{
|
32 |
+
description:
|
33 |
+
"An application that combines a mask generation model with an image embedding model for open-vocabulary image segmentation.",
|
34 |
+
id: "SkalskiP/SAM_and_MetaCLIP",
|
35 |
+
},
|
36 |
+
{
|
37 |
+
description: "An application that compares the performance of a large and a small mask generation model.",
|
38 |
+
id: "merve/slimsam",
|
39 |
+
},
|
40 |
+
{
|
41 |
+
description: "An application based on an improved mask generation model.",
|
42 |
+
id: "linfanluntan/Grounded-SAM",
|
43 |
+
},
|
44 |
+
{
|
45 |
+
description: "An application to remove objects from videos using mask generation models.",
|
46 |
+
id: "SkalskiP/SAM_and_ProPainter",
|
47 |
+
},
|
48 |
+
],
|
49 |
summary:
|
50 |
+
"Mask generation is the task of generating masks that identify a specific object or region of interest in a given image. Masks are often used in segmentation tasks, where they provide a precise way to isolate the object of interest for further processing or analysis.",
|
51 |
widgetModels: [],
|
52 |
youtubeId: "",
|
53 |
};
|
packages/tasks/src/tasks/text-generation/data.ts
CHANGED
@@ -12,12 +12,12 @@ const taskData: TaskDataCustom = {
|
|
12 |
id: "the_pile",
|
13 |
},
|
14 |
{
|
15 |
-
description: "
|
16 |
-
id: "
|
17 |
},
|
18 |
{
|
19 |
-
description: "
|
20 |
-
id: "
|
21 |
},
|
22 |
],
|
23 |
demo: {
|
@@ -59,66 +59,50 @@ const taskData: TaskDataCustom = {
|
|
59 |
id: "bigcode/starcoder",
|
60 |
},
|
61 |
{
|
62 |
-
description: "A
|
63 |
-
id: "
|
64 |
},
|
65 |
{
|
66 |
-
description: "
|
67 |
-
id: "
|
68 |
},
|
69 |
{
|
70 |
-
description: "A
|
71 |
-
id: "
|
72 |
},
|
73 |
{
|
74 |
-
description: "
|
75 |
-
id: "
|
76 |
},
|
77 |
{
|
78 |
-
description: "
|
79 |
-
id: "EleutherAI/pythia-12b",
|
80 |
-
},
|
81 |
-
{
|
82 |
-
description: "A large text-to-text model trained to follow instructions.",
|
83 |
-
id: "google/flan-ul2",
|
84 |
-
},
|
85 |
-
{
|
86 |
-
description: "A large and powerful text generation model.",
|
87 |
-
id: "tiiuae/falcon-40b",
|
88 |
-
},
|
89 |
-
{
|
90 |
-
description: "State-of-the-art open-source large language model.",
|
91 |
id: "meta-llama/Llama-2-70b-hf",
|
92 |
},
|
93 |
],
|
94 |
spaces: [
|
95 |
{
|
96 |
-
description: "A
|
97 |
-
id: "
|
98 |
},
|
99 |
{
|
100 |
-
description: "An text generation based application
|
101 |
-
id: "
|
102 |
},
|
103 |
{
|
104 |
-
description: "An text generation based application
|
105 |
-
id: "
|
106 |
},
|
107 |
{
|
108 |
description: "An text generation application that combines OpenAI and Hugging Face models.",
|
109 |
id: "microsoft/HuggingGPT",
|
110 |
},
|
111 |
{
|
112 |
-
description: "An
|
113 |
-
id: "
|
114 |
-
},
|
115 |
-
{
|
116 |
-
description: "An UI that uses StableLM-tuned-alpha-7b.",
|
117 |
-
id: "togethercomputer/OpenChatKit",
|
118 |
},
|
119 |
],
|
120 |
summary:
|
121 |
-
"Generating text is the task of
|
122 |
widgetModels: ["HuggingFaceH4/zephyr-7b-beta"],
|
123 |
youtubeId: "Vpjb1lu0MDk",
|
124 |
};
|
|
|
12 |
id: "the_pile",
|
13 |
},
|
14 |
{
|
15 |
+
description: "Truly open-source, curated and cleaned dialogue dataset.",
|
16 |
+
id: "HuggingFaceH4/ultrachat_200k",
|
17 |
},
|
18 |
{
|
19 |
+
description: "An instruction dataset with preference ratings on responses.",
|
20 |
+
id: "openbmb/UltraFeedback",
|
21 |
},
|
22 |
],
|
23 |
demo: {
|
|
|
59 |
id: "bigcode/starcoder",
|
60 |
},
|
61 |
{
|
62 |
+
description: "A very powerful text generation model.",
|
63 |
+
id: "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
64 |
},
|
65 |
{
|
66 |
+
description: "Small yet powerful text generation model.",
|
67 |
+
id: "microsoft/phi-2",
|
68 |
},
|
69 |
{
|
70 |
+
description: "A very powerful model that can chat, do mathematical reasoning and write code.",
|
71 |
+
id: "openchat/openchat-3.5-0106",
|
72 |
},
|
73 |
{
|
74 |
+
description: "Very strong yet small assistant model.",
|
75 |
+
id: "HuggingFaceH4/zephyr-7b-beta",
|
76 |
},
|
77 |
{
|
78 |
+
description: "Very strong open-source large language model.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
id: "meta-llama/Llama-2-70b-hf",
|
80 |
},
|
81 |
],
|
82 |
spaces: [
|
83 |
{
|
84 |
+
description: "A leaderboard to compare different open-source text generation models based on various benchmarks.",
|
85 |
+
id: "HuggingFaceH4/open_llm_leaderboard",
|
86 |
},
|
87 |
{
|
88 |
+
description: "An text generation based application based on a very powerful LLaMA2 model.",
|
89 |
+
id: "ysharma/Explore_llamav2_with_TGI",
|
90 |
},
|
91 |
{
|
92 |
+
description: "An text generation based application to converse with Zephyr model.",
|
93 |
+
id: "HuggingFaceH4/zephyr-chat",
|
94 |
},
|
95 |
{
|
96 |
description: "An text generation application that combines OpenAI and Hugging Face models.",
|
97 |
id: "microsoft/HuggingGPT",
|
98 |
},
|
99 |
{
|
100 |
+
description: "An chatbot to converse with a very powerful text generation model.",
|
101 |
+
id: "mlabonne/phixtral-chat",
|
|
|
|
|
|
|
|
|
102 |
},
|
103 |
],
|
104 |
summary:
|
105 |
+
"Generating text is the task of generating new text given another text. These models can, for example, fill in incomplete text or paraphrase.",
|
106 |
widgetModels: ["HuggingFaceH4/zephyr-7b-beta"],
|
107 |
youtubeId: "Vpjb1lu0MDk",
|
108 |
};
|
packages/tasks/src/tasks/text-to-image/data.ts
CHANGED
@@ -79,13 +79,17 @@ const taskData: TaskDataCustom = {
|
|
79 |
id: "latent-consistency/lcm-lora-for-sdxl",
|
80 |
},
|
81 |
{
|
82 |
-
description: "A
|
83 |
-
id: "
|
84 |
},
|
85 |
{
|
86 |
description: "An application for `text-to-image`, `image-to-image` and image inpainting.",
|
87 |
id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI",
|
88 |
},
|
|
|
|
|
|
|
|
|
89 |
],
|
90 |
summary:
|
91 |
"Generates images from input text. These models can be used to generate and modify images based on text prompts.",
|
|
|
79 |
id: "latent-consistency/lcm-lora-for-sdxl",
|
80 |
},
|
81 |
{
|
82 |
+
description: "A gallery to explore various text-to-image models.",
|
83 |
+
id: "multimodalart/LoraTheExplorer",
|
84 |
},
|
85 |
{
|
86 |
description: "An application for `text-to-image`, `image-to-image` and image inpainting.",
|
87 |
id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI",
|
88 |
},
|
89 |
+
{
|
90 |
+
description: "An application to generate realistic images given photos of a person and a prompt.",
|
91 |
+
id: "InstantX/InstantID",
|
92 |
+
},
|
93 |
],
|
94 |
summary:
|
95 |
"Generates images from input text. These models can be used to generate and modify images based on text prompts.",
|
packages/tasks/src/tasks/zero-shot-image-classification/data.ts
CHANGED
@@ -52,9 +52,8 @@ const taskData: TaskDataCustom = {
|
|
52 |
id: "openai/clip-vit-base-patch16",
|
53 |
},
|
54 |
{
|
55 |
-
description:
|
56 |
-
|
57 |
-
id: "openai/clip-vit-large-patch14-336",
|
58 |
},
|
59 |
{
|
60 |
description: "Strong image classification model for biomedical domain.",
|
@@ -64,12 +63,16 @@ const taskData: TaskDataCustom = {
|
|
64 |
spaces: [
|
65 |
{
|
66 |
description:
|
67 |
-
"An application that leverages zero
|
68 |
id: "pharma/CLIP-Interrogator",
|
69 |
},
|
|
|
|
|
|
|
|
|
70 |
],
|
71 |
summary:
|
72 |
-
"Zero
|
73 |
widgetModels: ["openai/clip-vit-large-patch14-336"],
|
74 |
youtubeId: "",
|
75 |
};
|
|
|
52 |
id: "openai/clip-vit-base-patch16",
|
53 |
},
|
54 |
{
|
55 |
+
description: "Strong zero-shot image classification model.",
|
56 |
+
id: "google/siglip-base-patch16-224",
|
|
|
57 |
},
|
58 |
{
|
59 |
description: "Strong image classification model for biomedical domain.",
|
|
|
63 |
spaces: [
|
64 |
{
|
65 |
description:
|
66 |
+
"An application that leverages zero-shot image classification to find best captions to generate an image. ",
|
67 |
id: "pharma/CLIP-Interrogator",
|
68 |
},
|
69 |
+
{
|
70 |
+
description: "An application to compare different zero-shot image classification models. ",
|
71 |
+
id: "merve/compare_clip_siglip",
|
72 |
+
},
|
73 |
],
|
74 |
summary:
|
75 |
+
"Zero-shot image classification is the task of classifying previously unseen classes during training of a model.",
|
76 |
widgetModels: ["openai/clip-vit-large-patch14-336"],
|
77 |
youtubeId: "",
|
78 |
};
|
packages/tasks/src/tasks/zero-shot-object-detection/data.ts
CHANGED
@@ -47,7 +47,12 @@ const taskData: TaskDataCustom = {
|
|
47 |
id: "google/owlv2-base-patch16-ensemble",
|
48 |
},
|
49 |
],
|
50 |
-
spaces: [
|
|
|
|
|
|
|
|
|
|
|
51 |
summary:
|
52 |
"Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.",
|
53 |
widgetModels: [],
|
|
|
47 |
id: "google/owlv2-base-patch16-ensemble",
|
48 |
},
|
49 |
],
|
50 |
+
spaces: [
|
51 |
+
{
|
52 |
+
description: "A demo to try the state-of-the-art zero-shot object detection model, OWLv2.",
|
53 |
+
id: "merve/owlv2",
|
54 |
+
},
|
55 |
+
],
|
56 |
summary:
|
57 |
"Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.",
|
58 |
widgetModels: [],
|