Spaces:
Sleeping
Sleeping
Upload 31 files
Browse files- .gitattributes +36 -35
- .gitignore +15 -0
- .replit +39 -0
- .streamlit/config.toml +12 -0
- .streamlit/styles.css +363 -0
- README.md +13 -13
- app.py +63 -0
- attached_assets/Pasted-Project-Overview-Web-Based-Model-Management-Interface-with-Streamlit-A-web-based-model-management-i-1741430507133.txt +46 -0
- components/__init__.py +1 -0
- components/create_repository.py +143 -0
- components/documentation_generator.py +348 -0
- components/edit_model.py +155 -0
- components/model_card.py +147 -0
- components/model_inference.py +378 -0
- components/sidebar.py +148 -0
- components/upload_model.py +122 -0
- components/version_control.py +194 -0
- generated-icon.png +3 -0
- pages/__init__.py +1 -0
- pages/analytics.py +277 -0
- pages/batch_operations.py +319 -0
- pages/home.py +209 -0
- pages/model_details.py +177 -0
- pages/repository_management.py +55 -0
- pyproject.toml +9 -0
- replit.nix +7 -0
- requirements.txt +15 -0
- utils/__init__.py +1 -0
- utils/api_client.py +154 -0
- utils/auth.py +91 -0
- uv.lock +0 -0
.gitattributes
CHANGED
@@ -1,35 +1,36 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
generated-icon.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Python
|
3 |
+
__pycache__/
|
4 |
+
*.py[cod]
|
5 |
+
*$py.class
|
6 |
+
.env
|
7 |
+
.venv
|
8 |
+
env/
|
9 |
+
venv/
|
10 |
+
|
11 |
+
# Streamlit
|
12 |
+
.streamlit/secrets.toml
|
13 |
+
|
14 |
+
# Miscellaneous
|
15 |
+
.DS_Store
|
.replit
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
modules = ["python-3.11"]
|
2 |
+
|
3 |
+
[nix]
|
4 |
+
channel = "stable-24_05"
|
5 |
+
|
6 |
+
[deployment]
|
7 |
+
deploymentTarget = "autoscale"
|
8 |
+
run = ["sh", "-c", "streamlit run app.py"]
|
9 |
+
|
10 |
+
[workflows]
|
11 |
+
runButton = "Project"
|
12 |
+
|
13 |
+
[[workflows.workflow]]
|
14 |
+
name = "Project"
|
15 |
+
mode = "parallel"
|
16 |
+
author = "agent"
|
17 |
+
|
18 |
+
[[workflows.workflow.tasks]]
|
19 |
+
task = "workflow.run"
|
20 |
+
args = "Streamlit App"
|
21 |
+
|
22 |
+
[[workflows.workflow]]
|
23 |
+
name = "Streamlit App"
|
24 |
+
author = "agent"
|
25 |
+
|
26 |
+
[workflows.workflow.metadata]
|
27 |
+
agentRequireRestartOnSave = false
|
28 |
+
|
29 |
+
[[workflows.workflow.tasks]]
|
30 |
+
task = "packager.installForAll"
|
31 |
+
|
32 |
+
[[workflows.workflow.tasks]]
|
33 |
+
task = "shell.exec"
|
34 |
+
args = "streamlit run app.py"
|
35 |
+
waitForPort = 8501
|
36 |
+
|
37 |
+
[[ports]]
|
38 |
+
localPort = 8501
|
39 |
+
externalPort = 80
|
.streamlit/config.toml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
[server]
|
3 |
+
headless = true
|
4 |
+
address = "0.0.0.0"
|
5 |
+
port = 8501
|
6 |
+
|
7 |
+
[theme]
|
8 |
+
primaryColor = "#FFD21E" # HF Yellow
|
9 |
+
secondaryBackgroundColor = "#F0F2F6"
|
10 |
+
backgroundColor = "#FFFFFF" # White
|
11 |
+
textColor = "#1A1A1A" # Near black
|
12 |
+
font = "sans serif"
|
.streamlit/styles.css
ADDED
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Custom styles to match Hugging Face branding */
|
2 |
+
|
3 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Source+Sans+Pro:wght@400;600;700&display=swap');
|
4 |
+
|
5 |
+
html, body, [class*="css"] {
|
6 |
+
font-family: 'Source Sans Pro', 'Inter', sans-serif;
|
7 |
+
font-size: 14px;
|
8 |
+
}
|
9 |
+
|
10 |
+
/* Primary buttons - HF Yellow */
|
11 |
+
.stButton > button {
|
12 |
+
background-color: #FFD21E;
|
13 |
+
color: #1A1A1A;
|
14 |
+
border: none;
|
15 |
+
font-weight: 600;
|
16 |
+
padding: 0.5rem 1rem;
|
17 |
+
border-radius: 0.375rem;
|
18 |
+
transition: all 0.2s;
|
19 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
|
20 |
+
}
|
21 |
+
|
22 |
+
.stButton > button:hover {
|
23 |
+
background-color: #E6BD1B;
|
24 |
+
color: #1A1A1A;
|
25 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
26 |
+
transform: translateY(-1px);
|
27 |
+
}
|
28 |
+
|
29 |
+
.stButton > button:active {
|
30 |
+
transform: translateY(1px);
|
31 |
+
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1);
|
32 |
+
}
|
33 |
+
|
34 |
+
/* Secondary buttons - HF Blue */
|
35 |
+
.secondary-button > button {
|
36 |
+
background-color: #84ADFF;
|
37 |
+
color: #1A1A1A;
|
38 |
+
}
|
39 |
+
|
40 |
+
.secondary-button > button:hover {
|
41 |
+
background-color: #6B8FE3;
|
42 |
+
}
|
43 |
+
|
44 |
+
/* Danger buttons - HF Coral */
|
45 |
+
.danger-button > button {
|
46 |
+
background-color: #FF9D96;
|
47 |
+
color: #1A1A1A;
|
48 |
+
}
|
49 |
+
|
50 |
+
.danger-button > button:hover {
|
51 |
+
background-color: #E58A84;
|
52 |
+
}
|
53 |
+
|
54 |
+
/* Card styling */
|
55 |
+
.hf-card {
|
56 |
+
background-color: #FFFFFF;
|
57 |
+
border-radius: 0.5rem;
|
58 |
+
padding: 1rem;
|
59 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
|
60 |
+
margin-bottom: 1rem;
|
61 |
+
border: 1px solid #E5E7EB;
|
62 |
+
transition: all 0.3s ease;
|
63 |
+
}
|
64 |
+
|
65 |
+
.hf-card:hover {
|
66 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
|
67 |
+
border-color: #84ADFF;
|
68 |
+
transform: translateY(-2px);
|
69 |
+
}
|
70 |
+
|
71 |
+
/* Links - HF Blue */
|
72 |
+
a {
|
73 |
+
color: #84ADFF;
|
74 |
+
text-decoration: none;
|
75 |
+
transition: color 0.2s;
|
76 |
+
}
|
77 |
+
|
78 |
+
a:hover {
|
79 |
+
color: #6B8FE3;
|
80 |
+
text-decoration: underline;
|
81 |
+
}
|
82 |
+
|
83 |
+
/* Headers */
|
84 |
+
h1, h2, h3, h4, h5, h6 {
|
85 |
+
font-family: 'Inter', sans-serif;
|
86 |
+
font-weight: 600;
|
87 |
+
color: #1A1A1A;
|
88 |
+
margin-top: 0.5em;
|
89 |
+
margin-bottom: 0.5em;
|
90 |
+
}
|
91 |
+
|
92 |
+
h1 {
|
93 |
+
font-size: 2rem;
|
94 |
+
border-bottom: 2px solid #FFD21E;
|
95 |
+
padding-bottom: 0.3em;
|
96 |
+
margin-bottom: 0.8em;
|
97 |
+
display: inline-block;
|
98 |
+
}
|
99 |
+
|
100 |
+
h2 {
|
101 |
+
font-size: 1.5rem;
|
102 |
+
border-bottom: 1px solid #E5E7EB;
|
103 |
+
padding-bottom: 0.2em;
|
104 |
+
}
|
105 |
+
|
106 |
+
/* Spacing */
|
107 |
+
.spacing-16 {
|
108 |
+
margin: 16px 0;
|
109 |
+
}
|
110 |
+
|
111 |
+
/* Status indicators */
|
112 |
+
.status-success {
|
113 |
+
color: #10B981;
|
114 |
+
}
|
115 |
+
|
116 |
+
.status-warning {
|
117 |
+
color: #FFD21E;
|
118 |
+
}
|
119 |
+
|
120 |
+
.status-error {
|
121 |
+
color: #FF9D96;
|
122 |
+
}
|
123 |
+
|
124 |
+
/* Sidebar styling */
|
125 |
+
.css-1lcbmhc .css-1adrfps {
|
126 |
+
background-color: #F9FAFB;
|
127 |
+
}
|
128 |
+
|
129 |
+
section[data-testid="stSidebar"] {
|
130 |
+
background-color: #F9FAFB;
|
131 |
+
border-right: 1px solid #E5E7EB;
|
132 |
+
}
|
133 |
+
|
134 |
+
/* Custom progress bar */
|
135 |
+
.stProgress > div > div > div > div {
|
136 |
+
background-color: #84ADFF;
|
137 |
+
}
|
138 |
+
|
139 |
+
/* Alert boxes */
|
140 |
+
.st-emotion-cache-16idsys p {
|
141 |
+
font-size: 14px;
|
142 |
+
}
|
143 |
+
|
144 |
+
div[data-baseweb="tooltip"] {
|
145 |
+
background-color: #1A1A1A;
|
146 |
+
color: white;
|
147 |
+
padding: 8px 12px;
|
148 |
+
border-radius: 4px;
|
149 |
+
font-size: 12px;
|
150 |
+
max-width: 300px;
|
151 |
+
z-index: 1000;
|
152 |
+
}
|
153 |
+
|
154 |
+
/* Adjust sidebar width */
|
155 |
+
section[data-testid="stSidebar"] {
|
156 |
+
width: 18rem !important;
|
157 |
+
}
|
158 |
+
|
159 |
+
/* Model cards layout */
|
160 |
+
.model-card {
|
161 |
+
display: flex;
|
162 |
+
flex-direction: column;
|
163 |
+
gap: 8px;
|
164 |
+
padding: 16px;
|
165 |
+
border-radius: 8px;
|
166 |
+
border: 1px solid #E5E7EB;
|
167 |
+
background-color: white;
|
168 |
+
transition: all 0.3s ease;
|
169 |
+
position: relative;
|
170 |
+
overflow: hidden;
|
171 |
+
}
|
172 |
+
|
173 |
+
.model-card:hover {
|
174 |
+
border-color: #84ADFF;
|
175 |
+
box-shadow: 0 8px 16px rgba(0, 0, 0, 0.1);
|
176 |
+
transform: translateY(-3px);
|
177 |
+
}
|
178 |
+
|
179 |
+
.model-card::after {
|
180 |
+
content: '';
|
181 |
+
position: absolute;
|
182 |
+
top: 0;
|
183 |
+
left: 0;
|
184 |
+
width: 4px;
|
185 |
+
height: 100%;
|
186 |
+
background-color: #FFD21E;
|
187 |
+
opacity: 0;
|
188 |
+
transition: opacity 0.3s ease;
|
189 |
+
}
|
190 |
+
|
191 |
+
.model-card:hover::after {
|
192 |
+
opacity: 1;
|
193 |
+
}
|
194 |
+
|
195 |
+
.model-card-header {
|
196 |
+
display: flex;
|
197 |
+
justify-content: space-between;
|
198 |
+
align-items: center;
|
199 |
+
}
|
200 |
+
|
201 |
+
.model-card-title {
|
202 |
+
font-weight: 600;
|
203 |
+
font-size: 1.1rem;
|
204 |
+
color: #1A1A1A;
|
205 |
+
}
|
206 |
+
|
207 |
+
.model-card-description {
|
208 |
+
color: #4B5563;
|
209 |
+
font-size: 0.9rem;
|
210 |
+
line-height: 1.4;
|
211 |
+
}
|
212 |
+
|
213 |
+
.model-card-footer {
|
214 |
+
display: flex;
|
215 |
+
justify-content: space-between;
|
216 |
+
margin-top: 12px;
|
217 |
+
align-items: center;
|
218 |
+
}
|
219 |
+
|
220 |
+
.model-card-tags {
|
221 |
+
display: flex;
|
222 |
+
gap: 8px;
|
223 |
+
flex-wrap: wrap;
|
224 |
+
}
|
225 |
+
|
226 |
+
.model-tag {
|
227 |
+
background-color: #F3F4F6;
|
228 |
+
padding: 4px 8px;
|
229 |
+
border-radius: 16px;
|
230 |
+
font-size: 0.8rem;
|
231 |
+
display: inline-flex;
|
232 |
+
align-items: center;
|
233 |
+
transition: background-color 0.2s;
|
234 |
+
}
|
235 |
+
|
236 |
+
.model-tag:hover {
|
237 |
+
background-color: #E5E7EB;
|
238 |
+
}
|
239 |
+
|
240 |
+
/* Badge styles */
|
241 |
+
.badge {
|
242 |
+
display: inline-flex;
|
243 |
+
align-items: center;
|
244 |
+
padding: 4px 8px;
|
245 |
+
border-radius: 4px;
|
246 |
+
font-size: 0.8rem;
|
247 |
+
font-weight: 500;
|
248 |
+
margin-right: 8px;
|
249 |
+
transition: all 0.2s;
|
250 |
+
}
|
251 |
+
|
252 |
+
.github-badge {
|
253 |
+
background-color: #24292e;
|
254 |
+
color: white;
|
255 |
+
}
|
256 |
+
|
257 |
+
.hf-badge {
|
258 |
+
background-color: #FFD21E;
|
259 |
+
color: #1A1A1A;
|
260 |
+
}
|
261 |
+
|
262 |
+
.badge img {
|
263 |
+
height: 14px;
|
264 |
+
margin-right: 4px;
|
265 |
+
}
|
266 |
+
|
267 |
+
.badge:hover {
|
268 |
+
opacity: 0.9;
|
269 |
+
transform: translateY(-1px);
|
270 |
+
}
|
271 |
+
|
272 |
+
/* Tooltip container */
|
273 |
+
.tooltip {
|
274 |
+
position: relative;
|
275 |
+
display: inline-block;
|
276 |
+
cursor: help;
|
277 |
+
}
|
278 |
+
|
279 |
+
.tooltip .tooltip-text {
|
280 |
+
visibility: hidden;
|
281 |
+
width: 200px;
|
282 |
+
background-color: #1A1A1A;
|
283 |
+
color: #fff;
|
284 |
+
text-align: center;
|
285 |
+
border-radius: 6px;
|
286 |
+
padding: 5px;
|
287 |
+
position: absolute;
|
288 |
+
z-index: 1;
|
289 |
+
bottom: 125%;
|
290 |
+
left: 50%;
|
291 |
+
margin-left: -100px;
|
292 |
+
opacity: 0;
|
293 |
+
transition: opacity 0.3s;
|
294 |
+
font-size: 12px;
|
295 |
+
}
|
296 |
+
|
297 |
+
.tooltip:hover .tooltip-text {
|
298 |
+
visibility: visible;
|
299 |
+
opacity: 1;
|
300 |
+
}
|
301 |
+
|
302 |
+
/* Instructions box */
|
303 |
+
.instructions-box {
|
304 |
+
background-color: #F9FAFB;
|
305 |
+
border: 1px solid #E5E7EB;
|
306 |
+
border-left: 4px solid #84ADFF;
|
307 |
+
border-radius: 4px;
|
308 |
+
padding: 12px 16px;
|
309 |
+
margin-bottom: 16px;
|
310 |
+
font-size: 0.9rem;
|
311 |
+
}
|
312 |
+
|
313 |
+
.instructions-box h4 {
|
314 |
+
margin-top: 0;
|
315 |
+
margin-bottom: 8px;
|
316 |
+
color: #1A1A1A;
|
317 |
+
}
|
318 |
+
|
319 |
+
.instructions-box p {
|
320 |
+
margin: 0 0 8px 0;
|
321 |
+
color: #4B5563;
|
322 |
+
}
|
323 |
+
|
324 |
+
.instructions-box ul {
|
325 |
+
margin: 0;
|
326 |
+
padding-left: 20px;
|
327 |
+
}
|
328 |
+
|
329 |
+
/* Form inputs styling */
|
330 |
+
.stTextInput > div > div > input {
|
331 |
+
border-radius: 4px;
|
332 |
+
border: 1px solid #E5E7EB;
|
333 |
+
padding: 8px 12px;
|
334 |
+
transition: all 0.2s;
|
335 |
+
}
|
336 |
+
|
337 |
+
.stTextInput > div > div > input:focus {
|
338 |
+
border-color: #84ADFF;
|
339 |
+
box-shadow: 0 0 0 3px rgba(132, 173, 255, 0.2);
|
340 |
+
}
|
341 |
+
|
342 |
+
.stTextArea > div > div > textarea {
|
343 |
+
border-radius: 4px;
|
344 |
+
border: 1px solid #E5E7EB;
|
345 |
+
padding: 8px 12px;
|
346 |
+
transition: all 0.2s;
|
347 |
+
}
|
348 |
+
|
349 |
+
.stTextArea > div > div > textarea:focus {
|
350 |
+
border-color: #84ADFF;
|
351 |
+
box-shadow: 0 0 0 3px rgba(132, 173, 255, 0.2);
|
352 |
+
}
|
353 |
+
|
354 |
+
/* Animation for loading states */
|
355 |
+
@keyframes pulse {
|
356 |
+
0% { opacity: 1; }
|
357 |
+
50% { opacity: 0.6; }
|
358 |
+
100% { opacity: 1; }
|
359 |
+
}
|
360 |
+
|
361 |
+
.loading {
|
362 |
+
animation: pulse 1.5s infinite ease-in-out;
|
363 |
+
}
|
README.md
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
-
---
|
2 |
-
title: ModelHubManager
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: gray
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.43.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: ModelHubManager
|
3 |
+
emoji: 🐨
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: gray
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.43.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from utils.auth import check_authentication, logout
|
4 |
+
from components.sidebar import render_sidebar
|
5 |
+
from pages.home import render_home
|
6 |
+
from pages.model_details import render_model_details
|
7 |
+
from pages.repository_management import render_repository_management
|
8 |
+
from pages.analytics import render_analytics_page # Added import for analytics page
|
9 |
+
from pages.batch_operations import render_batch_operations # Added import for batch operations page
|
10 |
+
|
11 |
+
# Set page configuration
|
12 |
+
st.set_page_config(
|
13 |
+
page_title="HF Model Manager",
|
14 |
+
page_icon="🤗",
|
15 |
+
layout="wide",
|
16 |
+
initial_sidebar_state="expanded",
|
17 |
+
)
|
18 |
+
|
19 |
+
# Apply custom CSS
|
20 |
+
with open(".streamlit/styles.css") as f:
|
21 |
+
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
|
22 |
+
|
23 |
+
# Initialize session state
|
24 |
+
if "authenticated" not in st.session_state:
|
25 |
+
st.session_state.authenticated = False
|
26 |
+
if "page" not in st.session_state:
|
27 |
+
st.session_state.page = "home"
|
28 |
+
if "selected_model" not in st.session_state:
|
29 |
+
st.session_state.selected_model = None
|
30 |
+
if "models" not in st.session_state:
|
31 |
+
st.session_state.models = []
|
32 |
+
if "api_token" not in st.session_state:
|
33 |
+
st.session_state.api_token = None
|
34 |
+
|
35 |
+
|
36 |
+
def main():
|
37 |
+
# Check authentication
|
38 |
+
if not st.session_state.authenticated:
|
39 |
+
check_authentication()
|
40 |
+
else:
|
41 |
+
# Render sidebar
|
42 |
+
render_sidebar()
|
43 |
+
|
44 |
+
# Render selected page
|
45 |
+
if st.session_state.page == "home":
|
46 |
+
render_home()
|
47 |
+
elif st.session_state.page == "model_details":
|
48 |
+
render_model_details()
|
49 |
+
elif st.session_state.page == "repository_management":
|
50 |
+
render_repository_management()
|
51 |
+
elif st.session_state.page == "analytics":
|
52 |
+
render_analytics_page()
|
53 |
+
elif st.session_state.page == "batch_operations": # Added routing for batch operations page
|
54 |
+
render_batch_operations()
|
55 |
+
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
# Check if we have the Hugging Face token in secrets
|
59 |
+
if not st.session_state.get("api_token") and os.environ.get("HF_TOKEN"):
|
60 |
+
st.session_state.api_token = os.environ.get("HF_TOKEN")
|
61 |
+
st.rerun()
|
62 |
+
|
63 |
+
main()
|
attached_assets/Pasted-Project-Overview-Web-Based-Model-Management-Interface-with-Streamlit-A-web-based-model-management-i-1741430507133.txt
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Project Overview: Web-Based Model Management Interface with Streamlit
|
2 |
+
A web-based model management interface built with Streamlit to interact with Hugging Face's ecosystem, enabling developers to efficiently manage their machine learning models through an intuitive, streamlined interface.
|
3 |
+
|
4 |
+
Core Features:
|
5 |
+
Create and manage model repositories: Easily create and maintain repositories for machine learning models with an intuitive interface.
|
6 |
+
Interface with Hugging Face APIs: Seamlessly integrate with Hugging Face APIs to upload, update, and query model metadata.
|
7 |
+
Push models to Hugging Face Model Hub: Directly push models to the Hugging Face Model Hub, making them accessible to the global ML community.
|
8 |
+
Interactive model management interface: An interactive UI that allows users to add, update, and delete models, with the ability to view model details and manage associated metadata.
|
9 |
+
Tech Stack:
|
10 |
+
Frontend:
|
11 |
+
Streamlit: Used for building the interactive UI, enabling rapid development of web applications.
|
12 |
+
Tailwind CSS: For streamlined styling, ensuring that the app remains visually appealing and consistent with the Hugging Face aesthetic.
|
13 |
+
Backend:
|
14 |
+
Hugging Face API: To interact with the Hugging Face ecosystem, enabling model pushes and metadata management.
|
15 |
+
Python: The backend logic for managing requests to Hugging Face APIs is implemented in Python.
|
16 |
+
Database:
|
17 |
+
Optionally, integrate a database like PostgreSQL or SQLite for persistent storage of model metadata.
|
18 |
+
Visual References:
|
19 |
+
Inspired by Hugging Face's Model Hub interface and Streamlit's clean app layouts, known for their developer-friendly and intuitive design.
|
20 |
+
Hugging Face’s interface for managing models is simple yet feature-packed.
|
21 |
+
Streamlit’s clean, minimalistic design will allow for quick interactions without distractions.
|
22 |
+
Style Guide:
|
23 |
+
Colors:
|
24 |
+
Primary: #FFD21E (HF Yellow) – for key action buttons and highlights.
|
25 |
+
Secondary: #84ADFF (HF Blue) – used for navigation, links, and headers.
|
26 |
+
Background: #FFFFFF (white) – ensuring the app is light and easy on the eyes.
|
27 |
+
Text: #1A1A1A (near black) – for readability and contrast.
|
28 |
+
Accent: #FF9D96 (HF Coral) – for alerts, error messages, and secondary buttons.
|
29 |
+
Design:
|
30 |
+
Fonts: Source Sans Pro/Inter fonts – for clear and modern typography.
|
31 |
+
Layout: Clean, single-column layout with consistent 16px spacing between elements.
|
32 |
+
Components: Card-based components to display model information, making it easy to view and manage data.
|
33 |
+
Responsive Design: Optimized for desktop workflows with mobile compatibility.
|
34 |
+
Tech Specifications:
|
35 |
+
Framework: Streamlit (Streamlit version 1.13.0 or above)
|
36 |
+
API Integration: Hugging Face Model Hub API for repository management
|
37 |
+
Hosting: Hugging Face Spaces (for deployment) or Heroku/Render for backend deployment
|
38 |
+
Database: (Optional) Use SQLite for local storage or PostgreSQL for more robust database management
|
39 |
+
Version Control: Git for version control and collaboration
|
40 |
+
Frontend: Tailwind CSS for styling and layout management
|
41 |
+
Backend Logic: Python for backend logic and integration with Hugging Face APIs
|
42 |
+
Streamlit Features:
|
43 |
+
Dynamic UI: Allows users to interact with form elements like text inputs, dropdowns, buttons, and file uploads for managing models.
|
44 |
+
Data Display: Use of tables, cards, and lists to display model information dynamically.
|
45 |
+
API Calls: Directly interact with Hugging Face APIs to push models, fetch metadata, and manage repositories without leaving the interface.
|
46 |
+
Interactivity: Real-time updates based on user input, creating a seamless experience for users to create and manage models.
|
components/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# This file is intentionally left empty to make the directory a Python package
|
components/create_repository.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import time
|
3 |
+
|
4 |
+
|
5 |
+
def create_repository_form():
|
6 |
+
"""Form for creating a new model repository"""
|
7 |
+
st.subheader("Create a New Model Repository")
|
8 |
+
|
9 |
+
with st.form("create_repo_form"):
|
10 |
+
# Repository name input
|
11 |
+
st.markdown("#### Repository Name")
|
12 |
+
repo_name = st.text_input(
|
13 |
+
"Enter a name for your repository",
|
14 |
+
help="This will be part of the URL: huggingface.co/username/repository-name",
|
15 |
+
placeholder="my-awesome-model",
|
16 |
+
key="repo_name_input",
|
17 |
+
)
|
18 |
+
|
19 |
+
# Repository visibility
|
20 |
+
st.markdown("#### Repository Visibility")
|
21 |
+
is_private = st.checkbox(
|
22 |
+
"Make repository private",
|
23 |
+
value=False,
|
24 |
+
help="Private repositories are only visible to you and collaborators",
|
25 |
+
)
|
26 |
+
|
27 |
+
# Repository type
|
28 |
+
st.markdown("#### Repository Type")
|
29 |
+
repo_type = st.selectbox(
|
30 |
+
"Select repository type",
|
31 |
+
options=["model", "dataset", "space"],
|
32 |
+
index=0,
|
33 |
+
help="The type of content you'll be storing in this repository",
|
34 |
+
)
|
35 |
+
|
36 |
+
# Model tags
|
37 |
+
st.markdown("#### Model Tags")
|
38 |
+
if "client" in st.session_state:
|
39 |
+
available_tags = st.session_state.client.get_model_tags()
|
40 |
+
selected_tags = st.multiselect(
|
41 |
+
"Select tags for your model",
|
42 |
+
options=available_tags,
|
43 |
+
help="Tags help others discover your model",
|
44 |
+
)
|
45 |
+
|
46 |
+
# Model description
|
47 |
+
st.markdown("#### Description")
|
48 |
+
description = st.text_area(
|
49 |
+
"Provide a brief description of your model",
|
50 |
+
placeholder="This model is designed for...",
|
51 |
+
help="This will appear on your model card and help others understand your model's purpose",
|
52 |
+
)
|
53 |
+
|
54 |
+
# Submit button
|
55 |
+
submitted = st.form_submit_button("Create Repository", use_container_width=True)
|
56 |
+
|
57 |
+
if submitted:
|
58 |
+
if not repo_name:
|
59 |
+
st.error("Repository name is required")
|
60 |
+
return False, None
|
61 |
+
|
62 |
+
# Validate repository name (alphanumeric with hyphens only)
|
63 |
+
if not all(c.isalnum() or c == "-" for c in repo_name):
|
64 |
+
st.error(
|
65 |
+
"Repository name can only contain letters, numbers, and hyphens"
|
66 |
+
)
|
67 |
+
return False, None
|
68 |
+
|
69 |
+
# Create the repository
|
70 |
+
with st.spinner("Creating repository..."):
|
71 |
+
try:
|
72 |
+
# Format the repo_id with username
|
73 |
+
username = st.session_state.username
|
74 |
+
repo_id = f"{username}/{repo_name}"
|
75 |
+
|
76 |
+
# Create the repository
|
77 |
+
success, response = st.session_state.client.create_model_repository(
|
78 |
+
repo_name=repo_id,
|
79 |
+
is_private=is_private,
|
80 |
+
exist_ok=False,
|
81 |
+
repo_type=repo_type,
|
82 |
+
)
|
83 |
+
|
84 |
+
if success:
|
85 |
+
# Create a basic model card with description and tags
|
86 |
+
model_card_content = f"""---
|
87 |
+
tags:
|
88 |
+
{chr(10).join(['- ' + tag for tag in selected_tags])}
|
89 |
+
---
|
90 |
+
|
91 |
+
# {repo_name}
|
92 |
+
|
93 |
+
{description}
|
94 |
+
|
95 |
+
## Model description
|
96 |
+
|
97 |
+
Add more details about your model here.
|
98 |
+
|
99 |
+
## Intended uses & limitations
|
100 |
+
|
101 |
+
Describe the intended uses of your model and any limitations.
|
102 |
+
|
103 |
+
## Training and evaluation data
|
104 |
+
|
105 |
+
Describe the data you used to train and evaluate your model.
|
106 |
+
|
107 |
+
## Training procedure
|
108 |
+
|
109 |
+
Describe the training procedure.
|
110 |
+
|
111 |
+
## Evaluation results
|
112 |
+
|
113 |
+
Provide evaluation results.
|
114 |
+
"""
|
115 |
+
# Update the model card
|
116 |
+
card_success, _ = st.session_state.client.update_model_card(
|
117 |
+
repo_id, model_card_content
|
118 |
+
)
|
119 |
+
|
120 |
+
if card_success:
|
121 |
+
st.success(f"Repository '{repo_id}' created successfully!")
|
122 |
+
|
123 |
+
# Update the models list
|
124 |
+
time.sleep(1) # Wait briefly for the API to update
|
125 |
+
st.session_state.models = (
|
126 |
+
st.session_state.client.get_user_models()
|
127 |
+
)
|
128 |
+
|
129 |
+
return True, repo_id
|
130 |
+
else:
|
131 |
+
st.warning(
|
132 |
+
"Repository created but failed to update model card."
|
133 |
+
)
|
134 |
+
return True, repo_id
|
135 |
+
else:
|
136 |
+
st.error(f"Failed to create repository: {response}")
|
137 |
+
return False, None
|
138 |
+
|
139 |
+
except Exception as e:
|
140 |
+
st.error(f"Error creating repository: {str(e)}")
|
141 |
+
return False, None
|
142 |
+
|
143 |
+
return False, None
|
components/documentation_generator.py
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import re
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
|
7 |
+
def model_documentation_generator(model_info):
|
8 |
+
"""Generate comprehensive model documentation based on metadata"""
|
9 |
+
if not model_info:
|
10 |
+
st.error("Model information not found")
|
11 |
+
return
|
12 |
+
|
13 |
+
st.subheader("🔄 Automated Model Documentation Generator")
|
14 |
+
st.markdown("This tool generates a comprehensive model card based on model metadata and your input.")
|
15 |
+
|
16 |
+
# Extract existing model card content if available
|
17 |
+
model_card_content = ""
|
18 |
+
yaml_content = ""
|
19 |
+
markdown_content = ""
|
20 |
+
try:
|
21 |
+
repo_id = model_info.modelId
|
22 |
+
model_card_url = f"https://huggingface.co/{repo_id}/raw/main/README.md"
|
23 |
+
response = st.session_state.client.api._get_paginated(model_card_url)
|
24 |
+
if response.status_code == 200:
|
25 |
+
model_card_content = response.text
|
26 |
+
|
27 |
+
# Extract YAML frontmatter
|
28 |
+
yaml_match = re.search(r"---\s+(.*?)\s+---", model_card_content, re.DOTALL)
|
29 |
+
if yaml_match:
|
30 |
+
yaml_content = yaml_match.group(1)
|
31 |
+
|
32 |
+
# Extract markdown content (everything after frontmatter)
|
33 |
+
markdown_match = re.search(r"---\s+.*?\s+---\s*(.*)", model_card_content, re.DOTALL)
|
34 |
+
if markdown_match:
|
35 |
+
markdown_content = markdown_match.group(1).strip()
|
36 |
+
except Exception as e:
|
37 |
+
st.warning(f"Couldn't load model card: {str(e)}")
|
38 |
+
|
39 |
+
# Form for model metadata input
|
40 |
+
with st.form("model_doc_form"):
|
41 |
+
st.markdown("### Model Metadata")
|
42 |
+
|
43 |
+
# Basic Information
|
44 |
+
st.markdown("#### Basic Information")
|
45 |
+
col1, col2 = st.columns(2)
|
46 |
+
|
47 |
+
with col1:
|
48 |
+
# Extract model name from repo ID
|
49 |
+
model_name = model_info.modelId.split("/")[-1]
|
50 |
+
model_title = st.text_input("Model Title", value=model_name.replace("-", " ").title())
|
51 |
+
|
52 |
+
with col2:
|
53 |
+
# Model type selection
|
54 |
+
model_type_options = [
|
55 |
+
"Text Classification",
|
56 |
+
"Token Classification",
|
57 |
+
"Question Answering",
|
58 |
+
"Summarization",
|
59 |
+
"Translation",
|
60 |
+
"Text Generation",
|
61 |
+
"Image Classification",
|
62 |
+
"Object Detection",
|
63 |
+
"Other"
|
64 |
+
]
|
65 |
+
|
66 |
+
# Try to determine model type from tags
|
67 |
+
default_type_index = 0
|
68 |
+
tags = getattr(model_info, "tags", [])
|
69 |
+
for i, option in enumerate(model_type_options):
|
70 |
+
option_key = option.lower().replace(" ", "-")
|
71 |
+
if option_key in tags or option_key.replace("-", "_") in tags:
|
72 |
+
default_type_index = i
|
73 |
+
break
|
74 |
+
|
75 |
+
model_type = st.selectbox(
|
76 |
+
"Model Type",
|
77 |
+
model_type_options,
|
78 |
+
index=default_type_index
|
79 |
+
)
|
80 |
+
|
81 |
+
# Model description
|
82 |
+
description = st.text_area(
|
83 |
+
"Model Description",
|
84 |
+
value=getattr(model_info, "description", "") or "",
|
85 |
+
height=100,
|
86 |
+
help="A brief overview of what the model does"
|
87 |
+
)
|
88 |
+
|
89 |
+
# Technical Information
|
90 |
+
st.markdown("#### Technical Information")
|
91 |
+
col1, col2 = st.columns(2)
|
92 |
+
|
93 |
+
with col1:
|
94 |
+
# Model Architecture
|
95 |
+
architecture_options = [
|
96 |
+
"BERT", "GPT-2", "T5", "RoBERTa", "DeBERTa", "DistilBERT",
|
97 |
+
"BART", "ResNet", "YOLO", "Other"
|
98 |
+
]
|
99 |
+
architecture = st.selectbox("Model Architecture", architecture_options)
|
100 |
+
|
101 |
+
# Framework
|
102 |
+
framework_options = ["PyTorch", "TensorFlow", "JAX", "Other"]
|
103 |
+
framework = st.selectbox("Framework", framework_options)
|
104 |
+
|
105 |
+
with col2:
|
106 |
+
# Model size
|
107 |
+
model_size = st.text_input("Model Size (e.g., 110M parameters)")
|
108 |
+
|
109 |
+
# Language
|
110 |
+
language_options = ["English", "French", "German", "Spanish", "Chinese", "Japanese", "Multilingual", "Other"]
|
111 |
+
language = st.selectbox("Language", language_options)
|
112 |
+
|
113 |
+
# Training Information
|
114 |
+
st.markdown("#### Training Information")
|
115 |
+
col1, col2 = st.columns(2)
|
116 |
+
|
117 |
+
with col1:
|
118 |
+
# Training Dataset
|
119 |
+
training_data = st.text_input("Training Dataset(s)")
|
120 |
+
|
121 |
+
# Training compute
|
122 |
+
training_compute = st.text_input("Training Infrastructure (e.g., TPU v3-8, 4x A100)")
|
123 |
+
|
124 |
+
with col2:
|
125 |
+
# Evaluation Dataset
|
126 |
+
eval_data = st.text_input("Evaluation Dataset(s)")
|
127 |
+
|
128 |
+
# Training time
|
129 |
+
training_time = st.text_input("Training Time (e.g., 3 days, 12 hours)")
|
130 |
+
|
131 |
+
# Performance Metrics
|
132 |
+
st.markdown("#### Performance Metrics")
|
133 |
+
|
134 |
+
metrics_data = st.text_area(
|
135 |
+
"Performance Metrics (one per line, e.g., 'Accuracy: 0.92')",
|
136 |
+
height=100,
|
137 |
+
help="Key metrics and their values"
|
138 |
+
)
|
139 |
+
|
140 |
+
# Limitations
|
141 |
+
st.markdown("#### Limitations and Biases")
|
142 |
+
|
143 |
+
limitations = st.text_area(
|
144 |
+
"Known Limitations and Biases",
|
145 |
+
height=100,
|
146 |
+
help="Document any known limitations, biases, or ethical considerations"
|
147 |
+
)
|
148 |
+
|
149 |
+
# Usage Information
|
150 |
+
st.markdown("#### Usage Information")
|
151 |
+
|
152 |
+
use_cases = st.text_area(
|
153 |
+
"Intended Use Cases",
|
154 |
+
height=100,
|
155 |
+
help="Describe how the model should be used"
|
156 |
+
)
|
157 |
+
|
158 |
+
code_example = st.text_area(
|
159 |
+
"Code Example",
|
160 |
+
height=150,
|
161 |
+
value=f"""
|
162 |
+
```python
|
163 |
+
from transformers import AutoTokenizer, AutoModel
|
164 |
+
|
165 |
+
tokenizer = AutoTokenizer.from_pretrained("{model_info.modelId}")
|
166 |
+
model = AutoModel.from_pretrained("{model_info.modelId}")
|
167 |
+
|
168 |
+
inputs = tokenizer("Hello, world!", return_tensors="pt")
|
169 |
+
outputs = model(**inputs)
|
170 |
+
```
|
171 |
+
""",
|
172 |
+
help="Provide a simple code example showing how to use the model"
|
173 |
+
)
|
174 |
+
|
175 |
+
# License and Citation
|
176 |
+
st.markdown("#### License and Citation")
|
177 |
+
|
178 |
+
license_options = ["MIT", "Apache-2.0", "GPL-3.0", "CC-BY-SA-4.0", "CC-BY-4.0", "Proprietary", "Other"]
|
179 |
+
license_type = st.selectbox("License", license_options)
|
180 |
+
|
181 |
+
citation = st.text_area(
|
182 |
+
"Citation Information",
|
183 |
+
height=100,
|
184 |
+
help="Provide citation information if applicable"
|
185 |
+
)
|
186 |
+
|
187 |
+
# Tags
|
188 |
+
st.markdown("#### Tags")
|
189 |
+
|
190 |
+
# Get available tags
|
191 |
+
available_tags = st.session_state.client.get_model_tags()
|
192 |
+
|
193 |
+
# Extract existing tags
|
194 |
+
existing_tags = []
|
195 |
+
if yaml_content:
|
196 |
+
tags_match = re.search(r"tags:\s*((?:- .*?\n)+)", yaml_content, re.DOTALL)
|
197 |
+
if tags_match:
|
198 |
+
existing_tags = [
|
199 |
+
line.strip("- \n")
|
200 |
+
for line in tags_match.group(1).split("\n")
|
201 |
+
if line.strip().startswith("-")
|
202 |
+
]
|
203 |
+
|
204 |
+
selected_tags = st.multiselect(
|
205 |
+
"Select tags for your model",
|
206 |
+
options=available_tags,
|
207 |
+
default=existing_tags,
|
208 |
+
help="Tags help others discover your model"
|
209 |
+
)
|
210 |
+
|
211 |
+
# Advanced options
|
212 |
+
with st.expander("Advanced Options"):
|
213 |
+
keep_existing_content = st.checkbox(
|
214 |
+
"Keep existing custom content",
|
215 |
+
value=True,
|
216 |
+
help="If checked, we'll try to preserve custom sections from your existing model card"
|
217 |
+
)
|
218 |
+
|
219 |
+
additional_sections = st.text_area(
|
220 |
+
"Additional Custom Sections (in Markdown)",
|
221 |
+
height=200,
|
222 |
+
help="Add any additional custom sections in Markdown format"
|
223 |
+
)
|
224 |
+
|
225 |
+
# Submit button
|
226 |
+
submitted = st.form_submit_button("Generate Model Card", use_container_width=True)
|
227 |
+
|
228 |
+
if submitted:
|
229 |
+
with st.spinner("Generating comprehensive model card..."):
|
230 |
+
try:
|
231 |
+
# Parse performance metrics
|
232 |
+
metrics_list = []
|
233 |
+
for line in metrics_data.split("\n"):
|
234 |
+
line = line.strip()
|
235 |
+
if line:
|
236 |
+
metrics_list.append(line)
|
237 |
+
|
238 |
+
# Generate YAML frontmatter
|
239 |
+
yaml_frontmatter = f"""tags:
|
240 |
+
{chr(10).join(['- ' + tag for tag in selected_tags])}
|
241 |
+
license: {license_type}"""
|
242 |
+
|
243 |
+
if language and language != "Other":
|
244 |
+
yaml_frontmatter += f"\nlanguage: {language.lower()}"
|
245 |
+
|
246 |
+
if model_type and model_type != "Other":
|
247 |
+
yaml_frontmatter += f"\npipeline_tag: {model_type.lower().replace(' ', '-')}"
|
248 |
+
|
249 |
+
# Generate markdown content
|
250 |
+
md_content = f"""# {model_title}
|
251 |
+
|
252 |
+
{description}
|
253 |
+
|
254 |
+
## Model Description
|
255 |
+
|
256 |
+
This model is a {architecture}-based model for {model_type} tasks. It was developed using {framework} and consists of {model_size if model_size else "multiple"} parameters.
|
257 |
+
|
258 |
+
"""
|
259 |
+
|
260 |
+
# Training section
|
261 |
+
if training_data or eval_data or training_compute or training_time:
|
262 |
+
md_content += "## Training and Evaluation Data\n\n"
|
263 |
+
|
264 |
+
if training_data:
|
265 |
+
md_content += f"The model was trained on {training_data}. "
|
266 |
+
|
267 |
+
if training_compute:
|
268 |
+
md_content += f"Training was performed using {training_compute}. "
|
269 |
+
|
270 |
+
if training_time:
|
271 |
+
md_content += f"The total training time was approximately {training_time}."
|
272 |
+
|
273 |
+
md_content += "\n\n"
|
274 |
+
|
275 |
+
if eval_data:
|
276 |
+
md_content += f"Evaluation was performed on {eval_data}.\n\n"
|
277 |
+
|
278 |
+
# Performance metrics
|
279 |
+
if metrics_list:
|
280 |
+
md_content += "## Model Performance\n\n"
|
281 |
+
md_content += "The model achieves the following performance metrics:\n\n"
|
282 |
+
for metric in metrics_list:
|
283 |
+
md_content += f"- {metric}\n"
|
284 |
+
md_content += "\n"
|
285 |
+
|
286 |
+
# Limitations
|
287 |
+
if limitations:
|
288 |
+
md_content += "## Limitations and Biases\n\n"
|
289 |
+
md_content += f"{limitations}\n\n"
|
290 |
+
|
291 |
+
# Usage
|
292 |
+
if use_cases:
|
293 |
+
md_content += "## Intended Uses & Limitations\n\n"
|
294 |
+
md_content += f"{use_cases}\n\n"
|
295 |
+
|
296 |
+
# Code example
|
297 |
+
if code_example:
|
298 |
+
md_content += "## How to Use\n\n"
|
299 |
+
md_content += "Here's an example of how to use this model:\n\n"
|
300 |
+
md_content += f"{code_example}\n\n"
|
301 |
+
|
302 |
+
# Citation
|
303 |
+
if citation:
|
304 |
+
md_content += "## Citation\n\n"
|
305 |
+
md_content += f"{citation}\n\n"
|
306 |
+
|
307 |
+
# Keep existing custom content if requested
|
308 |
+
if keep_existing_content and markdown_content:
|
309 |
+
# Try to extract sections we haven't covered
|
310 |
+
existing_sections = re.findall(r"^## (.+?)\n\n(.*?)(?=^## |\Z)", markdown_content, re.MULTILINE | re.DOTALL)
|
311 |
+
standard_sections = ["Model Description", "Training and Evaluation Data", "Model Performance",
|
312 |
+
"Limitations and Biases", "Intended Uses & Limitations", "How to Use", "Citation"]
|
313 |
+
|
314 |
+
for section_title, section_content in existing_sections:
|
315 |
+
if section_title.strip() not in standard_sections:
|
316 |
+
md_content += f"## {section_title}\n\n{section_content}\n\n"
|
317 |
+
|
318 |
+
# Add additional custom sections
|
319 |
+
if additional_sections:
|
320 |
+
md_content += f"\n{additional_sections}\n"
|
321 |
+
|
322 |
+
# Combine everything into the final model card
|
323 |
+
final_model_card = f"---\n{yaml_frontmatter}\n---\n\n{md_content.strip()}"
|
324 |
+
|
325 |
+
# Display the generated model card
|
326 |
+
st.markdown("### Generated Model Card")
|
327 |
+
st.code(final_model_card, language="markdown")
|
328 |
+
|
329 |
+
# Option to update the model card
|
330 |
+
if st.button("Update Model Card", use_container_width=True, type="primary"):
|
331 |
+
with st.spinner("Updating model card..."):
|
332 |
+
try:
|
333 |
+
# Update the model card
|
334 |
+
success, _ = st.session_state.client.update_model_card(
|
335 |
+
model_info.modelId, final_model_card
|
336 |
+
)
|
337 |
+
|
338 |
+
if success:
|
339 |
+
st.success("Model card updated successfully!")
|
340 |
+
time.sleep(1) # Give API time to update
|
341 |
+
st.rerun()
|
342 |
+
else:
|
343 |
+
st.error("Failed to update model card")
|
344 |
+
except Exception as e:
|
345 |
+
st.error(f"Error updating model card: {str(e)}")
|
346 |
+
|
347 |
+
except Exception as e:
|
348 |
+
st.error(f"Error generating model card: {str(e)}")
|
components/edit_model.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import re
|
3 |
+
|
4 |
+
|
5 |
+
def edit_model_form(model_info):
|
6 |
+
"""Form for editing model metadata"""
|
7 |
+
st.subheader("Edit Model Information")
|
8 |
+
|
9 |
+
if not model_info:
|
10 |
+
st.error("Model information not found")
|
11 |
+
return False, None
|
12 |
+
|
13 |
+
# Extract model card content (README.md) if available
|
14 |
+
model_card_content = ""
|
15 |
+
try:
|
16 |
+
repo_id = model_info.modelId
|
17 |
+
model_card_url = f"https://huggingface.co/{repo_id}/raw/main/README.md"
|
18 |
+
response = st.session_state.client.api._get_paginated(model_card_url)
|
19 |
+
if response.status_code == 200:
|
20 |
+
model_card_content = response.text
|
21 |
+
except Exception as e:
|
22 |
+
st.warning(f"Couldn't load model card: {str(e)}")
|
23 |
+
|
24 |
+
# Extract tags from model card
|
25 |
+
tags = []
|
26 |
+
if model_card_content:
|
27 |
+
# Look for tags section in YAML frontmatter
|
28 |
+
yaml_match = re.search(r"---\s+(.*?)\s+---", model_card_content, re.DOTALL)
|
29 |
+
if yaml_match:
|
30 |
+
yaml_content = yaml_match.group(1)
|
31 |
+
tags_match = re.search(r"tags:\s*((?:- .*?\n)+)", yaml_content, re.DOTALL)
|
32 |
+
if tags_match:
|
33 |
+
tags_content = tags_match.group(1)
|
34 |
+
tags = [
|
35 |
+
line.strip("- \n")
|
36 |
+
for line in tags_content.split("\n")
|
37 |
+
if line.strip().startswith("-")
|
38 |
+
]
|
39 |
+
|
40 |
+
# Extract description (first paragraph after the title)
|
41 |
+
description = ""
|
42 |
+
if model_card_content:
|
43 |
+
# Find content after title and before next heading
|
44 |
+
title_match = re.search(
|
45 |
+
r"# .*?\n\n(.*?)(?=\n## |\Z)", model_card_content, re.DOTALL
|
46 |
+
)
|
47 |
+
if title_match:
|
48 |
+
description = title_match.group(1).strip()
|
49 |
+
|
50 |
+
with st.form("edit_model_form"):
|
51 |
+
# Model tags
|
52 |
+
st.markdown("#### Model Tags")
|
53 |
+
available_tags = st.session_state.client.get_model_tags()
|
54 |
+
selected_tags = st.multiselect(
|
55 |
+
"Select tags for your model",
|
56 |
+
options=available_tags,
|
57 |
+
default=tags,
|
58 |
+
help="Tags help others discover your model",
|
59 |
+
)
|
60 |
+
|
61 |
+
# Model description
|
62 |
+
st.markdown("#### Description")
|
63 |
+
updated_description = st.text_area(
|
64 |
+
"Provide a brief description of your model",
|
65 |
+
value=description,
|
66 |
+
help="This will appear on your model card and help others understand your model's purpose",
|
67 |
+
)
|
68 |
+
|
69 |
+
# Full model card content (for advanced users)
|
70 |
+
st.markdown("#### Full Model Card (Markdown)")
|
71 |
+
st.markdown(
|
72 |
+
"Edit the full model card content if needed. This is in Markdown format."
|
73 |
+
)
|
74 |
+
updated_model_card = st.text_area(
|
75 |
+
"Model Card Content", value=model_card_content, height=300
|
76 |
+
)
|
77 |
+
|
78 |
+
# Submit button
|
79 |
+
submitted = st.form_submit_button(
|
80 |
+
"Update Model Information", use_container_width=True
|
81 |
+
)
|
82 |
+
|
83 |
+
if submitted:
|
84 |
+
# Update the model card
|
85 |
+
with st.spinner("Updating model information..."):
|
86 |
+
try:
|
87 |
+
repo_id = model_info.modelId
|
88 |
+
|
89 |
+
# If the user has edited the full model card, use that
|
90 |
+
if updated_model_card != model_card_content:
|
91 |
+
new_content = updated_model_card
|
92 |
+
else:
|
93 |
+
# Otherwise, update only tags and description in the existing card
|
94 |
+
# Update tags in YAML frontmatter
|
95 |
+
if yaml_match:
|
96 |
+
yaml_content = yaml_match.group(1)
|
97 |
+
if tags_match:
|
98 |
+
# Replace tags section
|
99 |
+
new_yaml = yaml_content.replace(
|
100 |
+
tags_match.group(0),
|
101 |
+
f"tags:\n"
|
102 |
+
+ "\n".join([f"- {tag}" for tag in selected_tags])
|
103 |
+
+ "\n",
|
104 |
+
)
|
105 |
+
else:
|
106 |
+
# Add tags section
|
107 |
+
new_yaml = (
|
108 |
+
yaml_content
|
109 |
+
+ f"\ntags:\n"
|
110 |
+
+ "\n".join([f"- {tag}" for tag in selected_tags])
|
111 |
+
+ "\n"
|
112 |
+
)
|
113 |
+
|
114 |
+
new_content = model_card_content.replace(
|
115 |
+
yaml_match.group(0), f"---\n{new_yaml}---"
|
116 |
+
)
|
117 |
+
else:
|
118 |
+
# Add YAML frontmatter with tags
|
119 |
+
tags_yaml = (
|
120 |
+
"---\ntags:\n"
|
121 |
+
+ "\n".join([f"- {tag}" for tag in selected_tags])
|
122 |
+
+ "\n---\n\n"
|
123 |
+
)
|
124 |
+
new_content = tags_yaml + model_card_content
|
125 |
+
|
126 |
+
# Update description
|
127 |
+
if title_match and updated_description != description:
|
128 |
+
new_content = new_content.replace(
|
129 |
+
title_match.group(0),
|
130 |
+
title_match.group(0).replace(
|
131 |
+
description, updated_description
|
132 |
+
),
|
133 |
+
)
|
134 |
+
|
135 |
+
# Update the model card
|
136 |
+
success, _ = st.session_state.client.update_model_card(
|
137 |
+
repo_id, new_content
|
138 |
+
)
|
139 |
+
|
140 |
+
if success:
|
141 |
+
st.success("Model information updated successfully!")
|
142 |
+
# Refresh the models list
|
143 |
+
st.session_state.models = (
|
144 |
+
st.session_state.client.get_user_models()
|
145 |
+
)
|
146 |
+
return True, repo_id
|
147 |
+
else:
|
148 |
+
st.error("Failed to update model information")
|
149 |
+
return False, None
|
150 |
+
|
151 |
+
except Exception as e:
|
152 |
+
st.error(f"Error updating model information: {str(e)}")
|
153 |
+
return False, None
|
154 |
+
|
155 |
+
return False, None
|
components/model_card.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import datetime
|
3 |
+
|
4 |
+
|
5 |
+
def model_list(models, on_select_callback):
|
6 |
+
"""Render a list of model cards with enhanced UI and metadata badges"""
|
7 |
+
if not models:
|
8 |
+
st.info("No models found. Create your first repository to get started!")
|
9 |
+
return
|
10 |
+
|
11 |
+
# Display the models in a grid
|
12 |
+
cols = st.columns(2)
|
13 |
+
|
14 |
+
for i, model in enumerate(models):
|
15 |
+
with cols[i % 2]:
|
16 |
+
with st.container():
|
17 |
+
st.markdown(
|
18 |
+
f"""
|
19 |
+
<div class="model-card">
|
20 |
+
<div class="model-card-header">
|
21 |
+
<div class="model-card-title">{model.modelId}</div>
|
22 |
+
<div>
|
23 |
+
<span class="badge hf-badge">
|
24 |
+
<img src="https://huggingface.co/front/assets/huggingface_logo-noborder.svg" alt="HF">
|
25 |
+
HF
|
26 |
+
</span>
|
27 |
+
<span class="tooltip">
|
28 |
+
ℹ️
|
29 |
+
<span class="tooltip-text">Last updated: {datetime.datetime.now().strftime("%Y-%m-%d")}</span>
|
30 |
+
</span>
|
31 |
+
</div>
|
32 |
+
</div>
|
33 |
+
<div class="model-card-description">
|
34 |
+
{getattr(model, 'description', 'No description available.') or 'No description available.'}
|
35 |
+
</div>
|
36 |
+
<div class="model-card-footer">
|
37 |
+
<div class="model-card-tags">
|
38 |
+
{generate_tags(model)}
|
39 |
+
</div>
|
40 |
+
<div>
|
41 |
+
<span style="color: #6B7280; font-size: 0.8rem; margin-right: 8px;">
|
42 |
+
<span title="Downloads">📥 {getattr(model, 'downloads', 0)}</span>
|
43 |
+
</span>
|
44 |
+
<span style="color: #6B7280; font-size: 0.8rem;">
|
45 |
+
<span title="Likes">❤️ {getattr(model, 'likes', 0)}</span>
|
46 |
+
</span>
|
47 |
+
</div>
|
48 |
+
</div>
|
49 |
+
</div>
|
50 |
+
""",
|
51 |
+
unsafe_allow_html=True,
|
52 |
+
)
|
53 |
+
# Button to select model below the card
|
54 |
+
if st.button(
|
55 |
+
f"View Details",
|
56 |
+
key=f"view_{model.modelId}",
|
57 |
+
use_container_width=True,
|
58 |
+
):
|
59 |
+
on_select_callback(model.modelId)
|
60 |
+
|
61 |
+
|
62 |
+
def generate_tags(model):
|
63 |
+
"""Generate HTML for model tags"""
|
64 |
+
tags = getattr(model, "tags", []) or []
|
65 |
+
if not tags:
|
66 |
+
tags = ["untagged"]
|
67 |
+
|
68 |
+
tags_html = ""
|
69 |
+
for tag in tags[:3]: # Limit to 3 tags to avoid clutter
|
70 |
+
tags_html += f'<span class="model-tag">{tag}</span>'
|
71 |
+
|
72 |
+
if len(tags) > 3:
|
73 |
+
tags_html += f'<span class="model-tag">+{len(tags) - 3} more</span>'
|
74 |
+
|
75 |
+
return tags_html
|
76 |
+
|
77 |
+
|
78 |
+
def model_detail_card(model):
|
79 |
+
"""Render a detailed model card with badges, stats, and actions"""
|
80 |
+
st.markdown(
|
81 |
+
f"""
|
82 |
+
<div class="hf-card" style="padding: 20px; margin-bottom: 24px;">
|
83 |
+
<div style="display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 16px;">
|
84 |
+
<div>
|
85 |
+
<h2 style="margin-top: 0; margin-bottom: 8px;">{model.modelId}</h2>
|
86 |
+
<div style="display: flex; gap: 8px; margin-bottom: 16px;">
|
87 |
+
<a href="https://huggingface.co/{st.session_state.username}/{model.modelId}" target="_blank" class="badge hf-badge">
|
88 |
+
<img src="https://huggingface.co/front/assets/huggingface_logo-noborder.svg" alt="HF">
|
89 |
+
View on Hugging Face
|
90 |
+
</a>
|
91 |
+
<a href="#" class="badge github-badge">
|
92 |
+
<img src="https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png" alt="GitHub">
|
93 |
+
View on GitHub
|
94 |
+
</a>
|
95 |
+
</div>
|
96 |
+
</div>
|
97 |
+
<div style="display: flex; gap: 8px; align-items: center;">
|
98 |
+
<span title="Downloads" class="tooltip">
|
99 |
+
📥 {getattr(model, 'downloads', 0)}
|
100 |
+
<span class="tooltip-text">Total downloads</span>
|
101 |
+
</span>
|
102 |
+
<span title="Likes" class="tooltip">
|
103 |
+
❤️ {getattr(model, 'likes', 0)}
|
104 |
+
<span class="tooltip-text">Total likes</span>
|
105 |
+
</span>
|
106 |
+
</div>
|
107 |
+
</div>
|
108 |
+
|
109 |
+
<div style="margin-bottom: 16px;">
|
110 |
+
<h4 style="margin-bottom: 4px;">Description</h4>
|
111 |
+
<p>{getattr(model, 'description', 'No description available.') or 'No description available.'}</p>
|
112 |
+
</div>
|
113 |
+
|
114 |
+
<div style="margin-bottom: 16px;">
|
115 |
+
<h4 style="margin-bottom: 4px;">Tags</h4>
|
116 |
+
<div style="display: flex; flex-wrap: wrap; gap: 8px;">
|
117 |
+
{generate_detailed_tags(model)}
|
118 |
+
</div>
|
119 |
+
</div>
|
120 |
+
|
121 |
+
<div style="display: grid; grid-template-columns: 1fr 1fr; gap: 16px; margin-bottom: 16px;">
|
122 |
+
<div>
|
123 |
+
<h4 style="margin-bottom: 4px;">Last Updated</h4>
|
124 |
+
<p>{datetime.datetime.now().strftime("%B %d, %Y")}</p>
|
125 |
+
</div>
|
126 |
+
<div>
|
127 |
+
<h4 style="margin-bottom: 4px;">Model Size</h4>
|
128 |
+
<p>{getattr(model, 'size', 'Unknown')} MB</p>
|
129 |
+
</div>
|
130 |
+
</div>
|
131 |
+
</div>
|
132 |
+
""",
|
133 |
+
unsafe_allow_html=True,
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
def generate_detailed_tags(model):
|
138 |
+
"""Generate HTML for detailed model tags view"""
|
139 |
+
tags = getattr(model, "tags", []) or []
|
140 |
+
if not tags:
|
141 |
+
return '<span class="model-tag">untagged</span>'
|
142 |
+
|
143 |
+
tags_html = ""
|
144 |
+
for tag in tags:
|
145 |
+
tags_html += f'<span class="model-tag">{tag}</span>'
|
146 |
+
|
147 |
+
return tags_html
|
components/model_inference.py
ADDED
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import json
|
4 |
+
import pandas as pd
|
5 |
+
import numpy as np
|
6 |
+
import plotly.express as px
|
7 |
+
from io import StringIO
|
8 |
+
import time
|
9 |
+
|
10 |
+
def model_inference_dashboard(model_info):
|
11 |
+
"""Create a dashboard for testing model inference directly in the app"""
|
12 |
+
if not model_info:
|
13 |
+
st.error("Model information not found")
|
14 |
+
return
|
15 |
+
|
16 |
+
st.subheader("🧠 Model Inference Dashboard")
|
17 |
+
|
18 |
+
# Get the pipeline type based on model tags or information
|
19 |
+
pipeline_tag = getattr(model_info, "pipeline_tag", None)
|
20 |
+
if not pipeline_tag:
|
21 |
+
# Try to determine from tags
|
22 |
+
tags = getattr(model_info, "tags", [])
|
23 |
+
for tag in tags:
|
24 |
+
if tag in [
|
25 |
+
"text-classification", "token-classification", "question-answering",
|
26 |
+
"summarization", "translation", "text-generation", "fill-mask",
|
27 |
+
"sentence-similarity", "image-classification", "object-detection",
|
28 |
+
"image-segmentation", "text-to-image", "image-to-text"
|
29 |
+
]:
|
30 |
+
pipeline_tag = tag
|
31 |
+
break
|
32 |
+
|
33 |
+
if not pipeline_tag:
|
34 |
+
pipeline_tag = "text-classification" # Default fallback
|
35 |
+
|
36 |
+
# Display information about the model
|
37 |
+
st.info(f"This dashboard allows you to test your model's inference capabilities. Model pipeline: **{pipeline_tag}**")
|
38 |
+
|
39 |
+
# Different input options based on pipeline type
|
40 |
+
input_data = None
|
41 |
+
|
42 |
+
if pipeline_tag in ["text-classification", "token-classification", "fill-mask", "text-generation", "summarization"]:
|
43 |
+
# Text-based input
|
44 |
+
st.markdown("### Text Input")
|
45 |
+
input_text = st.text_area(
|
46 |
+
"Enter text for inference",
|
47 |
+
value="This model is amazing!",
|
48 |
+
height=150
|
49 |
+
)
|
50 |
+
|
51 |
+
# Additional parameters for specific pipelines
|
52 |
+
if pipeline_tag == "text-generation":
|
53 |
+
col1, col2 = st.columns(2)
|
54 |
+
with col1:
|
55 |
+
max_length = st.slider("Max Length", min_value=10, max_value=500, value=100)
|
56 |
+
with col2:
|
57 |
+
temperature = st.slider("Temperature", min_value=0.1, max_value=2.0, value=1.0, step=0.1)
|
58 |
+
|
59 |
+
input_data = {
|
60 |
+
"text": input_text,
|
61 |
+
"max_length": max_length,
|
62 |
+
"temperature": temperature
|
63 |
+
}
|
64 |
+
elif pipeline_tag == "summarization":
|
65 |
+
max_length = st.slider("Max Summary Length", min_value=10, max_value=200, value=50)
|
66 |
+
input_data = {
|
67 |
+
"text": input_text,
|
68 |
+
"max_length": max_length
|
69 |
+
}
|
70 |
+
else:
|
71 |
+
input_data = {"text": input_text}
|
72 |
+
|
73 |
+
elif pipeline_tag in ["question-answering"]:
|
74 |
+
st.markdown("### Question & Context")
|
75 |
+
question = st.text_input("Question", value="What is this model about?")
|
76 |
+
context = st.text_area(
|
77 |
+
"Context",
|
78 |
+
value="This model is a transformer-based language model designed for natural language understanding tasks.",
|
79 |
+
height=150
|
80 |
+
)
|
81 |
+
input_data = {
|
82 |
+
"question": question,
|
83 |
+
"context": context
|
84 |
+
}
|
85 |
+
|
86 |
+
elif pipeline_tag in ["translation"]:
|
87 |
+
st.markdown("### Translation")
|
88 |
+
source_lang = st.selectbox("Source Language", ["English", "French", "German", "Spanish", "Chinese"])
|
89 |
+
target_lang = st.selectbox("Target Language", ["French", "English", "German", "Spanish", "Chinese"])
|
90 |
+
translation_text = st.text_area("Text to translate", value="Hello, how are you?", height=150)
|
91 |
+
input_data = {
|
92 |
+
"text": translation_text,
|
93 |
+
"source_language": source_lang,
|
94 |
+
"target_language": target_lang
|
95 |
+
}
|
96 |
+
|
97 |
+
elif pipeline_tag in ["image-classification", "object-detection", "image-segmentation"]:
|
98 |
+
st.markdown("### Image Input")
|
99 |
+
upload_method = st.radio("Select input method", ["Upload Image", "Image URL"])
|
100 |
+
|
101 |
+
if upload_method == "Upload Image":
|
102 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
103 |
+
if uploaded_file is not None:
|
104 |
+
st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
|
105 |
+
input_data = {"image": uploaded_file}
|
106 |
+
else:
|
107 |
+
image_url = st.text_input("Image URL", value="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/distilbert-base-uncased-finetuned-sst-2-english-architecture.png")
|
108 |
+
if image_url:
|
109 |
+
st.image(image_url, caption="Image from URL", use_column_width=True)
|
110 |
+
input_data = {"image_url": image_url}
|
111 |
+
|
112 |
+
elif pipeline_tag in ["audio-classification", "automatic-speech-recognition"]:
|
113 |
+
st.markdown("### Audio Input")
|
114 |
+
upload_method = st.radio("Select input method", ["Upload Audio", "Audio URL"])
|
115 |
+
|
116 |
+
if upload_method == "Upload Audio":
|
117 |
+
uploaded_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "ogg"])
|
118 |
+
if uploaded_file is not None:
|
119 |
+
st.audio(uploaded_file)
|
120 |
+
input_data = {"audio": uploaded_file}
|
121 |
+
else:
|
122 |
+
audio_url = st.text_input("Audio URL")
|
123 |
+
if audio_url:
|
124 |
+
st.audio(audio_url)
|
125 |
+
input_data = {"audio_url": audio_url}
|
126 |
+
|
127 |
+
# Execute inference
|
128 |
+
if st.button("Run Inference", use_container_width=True):
|
129 |
+
if input_data:
|
130 |
+
with st.spinner("Running inference..."):
|
131 |
+
# In a real implementation, this would call the HF Inference API
|
132 |
+
# For demo purposes, simulate a response
|
133 |
+
time.sleep(2)
|
134 |
+
|
135 |
+
# Generate a sample response based on the pipeline type
|
136 |
+
if pipeline_tag == "text-classification":
|
137 |
+
result = [
|
138 |
+
{"label": "POSITIVE", "score": 0.9231},
|
139 |
+
{"label": "NEGATIVE", "score": 0.0769}
|
140 |
+
]
|
141 |
+
elif pipeline_tag == "token-classification":
|
142 |
+
result = [
|
143 |
+
{"entity": "B-PER", "word": "This", "score": 0.2, "index": 0, "start": 0, "end": 4},
|
144 |
+
{"entity": "O", "word": "model", "score": 0.95, "index": 1, "start": 5, "end": 10},
|
145 |
+
{"entity": "O", "word": "is", "score": 0.99, "index": 2, "start": 11, "end": 13},
|
146 |
+
{"entity": "B-MISC", "word": "amazing", "score": 0.85, "index": 3, "start": 14, "end": 21}
|
147 |
+
]
|
148 |
+
elif pipeline_tag == "text-generation":
|
149 |
+
result = {
|
150 |
+
"generated_text": input_data["text"] + " It provides state-of-the-art performance on a wide range of natural language processing tasks, including sentiment analysis, named entity recognition, and question answering. The model was trained on a diverse corpus of text data, allowing it to generate coherent and contextually relevant responses."
|
151 |
+
}
|
152 |
+
elif pipeline_tag == "summarization":
|
153 |
+
result = {
|
154 |
+
"summary_text": "This model provides excellent performance."
|
155 |
+
}
|
156 |
+
elif pipeline_tag == "question-answering":
|
157 |
+
result = {
|
158 |
+
"answer": "a transformer-based language model",
|
159 |
+
"start": 9,
|
160 |
+
"end": 45,
|
161 |
+
"score": 0.953
|
162 |
+
}
|
163 |
+
elif pipeline_tag == "translation":
|
164 |
+
if input_data["target_language"] == "French":
|
165 |
+
result = {"translation_text": "Bonjour, comment allez-vous?"}
|
166 |
+
elif input_data["target_language"] == "German":
|
167 |
+
result = {"translation_text": "Hallo, wie geht es dir?"}
|
168 |
+
elif input_data["target_language"] == "Spanish":
|
169 |
+
result = {"translation_text": "Hola, ¿cómo estás?"}
|
170 |
+
elif input_data["target_language"] == "Chinese":
|
171 |
+
result = {"translation_text": "你好,你好吗?"}
|
172 |
+
else:
|
173 |
+
result = {"translation_text": "Hello, how are you?"}
|
174 |
+
elif pipeline_tag in ["image-classification"]:
|
175 |
+
result = [
|
176 |
+
{"label": "diagram", "score": 0.9712},
|
177 |
+
{"label": "architecture", "score": 0.0231},
|
178 |
+
{"label": "document", "score": 0.0057}
|
179 |
+
]
|
180 |
+
elif pipeline_tag in ["object-detection"]:
|
181 |
+
result = [
|
182 |
+
{"label": "box", "score": 0.9712, "box": {"xmin": 10, "ymin": 20, "xmax": 100, "ymax": 80}},
|
183 |
+
{"label": "text", "score": 0.8923, "box": {"xmin": 120, "ymin": 30, "xmax": 250, "ymax": 60}}
|
184 |
+
]
|
185 |
+
else:
|
186 |
+
result = {"result": "Sample response for " + pipeline_tag}
|
187 |
+
|
188 |
+
# Display the results
|
189 |
+
st.markdown("### Inference Results")
|
190 |
+
|
191 |
+
# Different visualizations based on the response type
|
192 |
+
if pipeline_tag == "text-classification":
|
193 |
+
# Create a bar chart for classification results
|
194 |
+
result_df = pd.DataFrame(result)
|
195 |
+
fig = px.bar(
|
196 |
+
result_df,
|
197 |
+
x="label",
|
198 |
+
y="score",
|
199 |
+
color="score",
|
200 |
+
color_continuous_scale=px.colors.sequential.Viridis,
|
201 |
+
title="Classification Results"
|
202 |
+
)
|
203 |
+
st.plotly_chart(fig, use_container_width=True)
|
204 |
+
|
205 |
+
# Show the raw results
|
206 |
+
st.json(result)
|
207 |
+
|
208 |
+
elif pipeline_tag == "token-classification":
|
209 |
+
# Display entity highlighting
|
210 |
+
st.markdown("#### Named Entities")
|
211 |
+
|
212 |
+
# Create HTML with colored spans for entities
|
213 |
+
html = ""
|
214 |
+
input_text = input_data["text"]
|
215 |
+
entities = {}
|
216 |
+
|
217 |
+
for item in result:
|
218 |
+
if item["entity"].startswith("B-") or item["entity"].startswith("I-"):
|
219 |
+
entity_type = item["entity"][2:] # Remove B- or I- prefix
|
220 |
+
entities[entity_type] = entities.get(entity_type, 0) + 1
|
221 |
+
|
222 |
+
# Create a color map for entity types
|
223 |
+
colors = px.colors.qualitative.Plotly[:len(entities)]
|
224 |
+
entity_colors = dict(zip(entities.keys(), colors))
|
225 |
+
|
226 |
+
# Create the HTML
|
227 |
+
for item in result:
|
228 |
+
word = item["word"]
|
229 |
+
entity = item["entity"]
|
230 |
+
|
231 |
+
if entity == "O":
|
232 |
+
html += f"{word} "
|
233 |
+
else:
|
234 |
+
entity_type = entity[2:] if entity.startswith("B-") or entity.startswith("I-") else entity
|
235 |
+
color = entity_colors.get(entity_type, "#CCCCCC")
|
236 |
+
html += f'<span style="background-color: {color}; padding: 2px; border-radius: 3px;" title="{entity} ({item["score"]:.2f})">{word}</span> '
|
237 |
+
|
238 |
+
st.markdown(f'<div style="line-height: 2.5;">{html}</div>', unsafe_allow_html=True)
|
239 |
+
|
240 |
+
# Display legend
|
241 |
+
st.markdown("#### Entity Legend")
|
242 |
+
legend_html = "".join([
|
243 |
+
f'<span style="background-color: {color}; padding: 2px 8px; margin-right: 10px; border-radius: 3px;">{entity}</span>'
|
244 |
+
for entity, color in entity_colors.items()
|
245 |
+
])
|
246 |
+
st.markdown(f'<div>{legend_html}</div>', unsafe_allow_html=True)
|
247 |
+
|
248 |
+
# Show the raw results
|
249 |
+
st.json(result)
|
250 |
+
|
251 |
+
elif pipeline_tag in ["text-generation", "summarization", "translation"]:
|
252 |
+
# Display the generated text
|
253 |
+
response_key = "generated_text" if "generated_text" in result else "summary_text" if "summary_text" in result else "translation_text"
|
254 |
+
st.markdown(f"#### Output Text")
|
255 |
+
st.markdown(f'<div style="background-color: #f0f2f6; padding: 20px; border-radius: 10px;">{result[response_key]}</div>', unsafe_allow_html=True)
|
256 |
+
|
257 |
+
# Text stats
|
258 |
+
st.markdown("#### Text Statistics")
|
259 |
+
input_length = len(input_data["text"]) if "text" in input_data else 0
|
260 |
+
output_length = len(result[response_key])
|
261 |
+
|
262 |
+
col1, col2, col3 = st.columns(3)
|
263 |
+
with col1:
|
264 |
+
st.metric("Input Length", input_length, "characters")
|
265 |
+
with col2:
|
266 |
+
st.metric("Output Length", output_length, "characters")
|
267 |
+
with col3:
|
268 |
+
compression = ((output_length - input_length) / input_length * 100) if input_length > 0 else 0
|
269 |
+
st.metric("Length Change", f"{compression:.1f}%", f"{output_length - input_length} chars")
|
270 |
+
|
271 |
+
elif pipeline_tag == "question-answering":
|
272 |
+
# Highlight the answer in the context
|
273 |
+
st.markdown("#### Answer")
|
274 |
+
st.markdown(f'<div style="background-color: #e6f3ff; padding: 10px; border-radius: 5px; font-weight: bold;">{result["answer"]}</div>', unsafe_allow_html=True)
|
275 |
+
|
276 |
+
# Show the answer in context
|
277 |
+
if "context" in input_data:
|
278 |
+
st.markdown("#### Answer in Context")
|
279 |
+
context = input_data["context"]
|
280 |
+
start = result["start"]
|
281 |
+
end = result["end"]
|
282 |
+
|
283 |
+
highlighted_context = (
|
284 |
+
context[:start] +
|
285 |
+
f'<span style="background-color: #ffeb3b; font-weight: bold;">{context[start:end]}</span>' +
|
286 |
+
context[end:]
|
287 |
+
)
|
288 |
+
|
289 |
+
st.markdown(f'<div style="background-color: #f0f2f6; padding: 15px; border-radius: 10px; line-height: 1.5;">{highlighted_context}</div>', unsafe_allow_html=True)
|
290 |
+
|
291 |
+
# Confidence score
|
292 |
+
st.markdown("#### Confidence")
|
293 |
+
st.progress(result["score"])
|
294 |
+
st.text(f"Confidence Score: {result['score']:.4f}")
|
295 |
+
|
296 |
+
elif pipeline_tag == "image-classification":
|
297 |
+
# Create a bar chart for classification results
|
298 |
+
result_df = pd.DataFrame(result)
|
299 |
+
fig = px.bar(
|
300 |
+
result_df,
|
301 |
+
x="score",
|
302 |
+
y="label",
|
303 |
+
orientation='h',
|
304 |
+
color="score",
|
305 |
+
color_continuous_scale=px.colors.sequential.Viridis,
|
306 |
+
title="Image Classification Results"
|
307 |
+
)
|
308 |
+
fig.update_layout(yaxis={'categoryorder':'total ascending'})
|
309 |
+
st.plotly_chart(fig, use_container_width=True)
|
310 |
+
|
311 |
+
# Show the raw results
|
312 |
+
st.json(result)
|
313 |
+
|
314 |
+
else:
|
315 |
+
# Generic display for other types
|
316 |
+
st.json(result)
|
317 |
+
|
318 |
+
# Option to save the results
|
319 |
+
st.download_button(
|
320 |
+
label="Download Results",
|
321 |
+
data=json.dumps(result, indent=2),
|
322 |
+
file_name="inference_results.json",
|
323 |
+
mime="application/json"
|
324 |
+
)
|
325 |
+
|
326 |
+
else:
|
327 |
+
st.warning("Please provide input data for inference")
|
328 |
+
|
329 |
+
# API integration options
|
330 |
+
with st.expander("API Integration"):
|
331 |
+
st.markdown("### Use this model in your application")
|
332 |
+
|
333 |
+
# Python code example
|
334 |
+
st.markdown("#### Python")
|
335 |
+
python_code = f"""
|
336 |
+
```python
|
337 |
+
import requests
|
338 |
+
|
339 |
+
API_URL = "https://api-inference.huggingface.co/models/{model_info.modelId}"
|
340 |
+
headers = {{"Authorization": "Bearer YOUR_API_KEY"}}
|
341 |
+
|
342 |
+
def query(payload):
|
343 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
344 |
+
return response.json()
|
345 |
+
|
346 |
+
# Example usage
|
347 |
+
output = query({{
|
348 |
+
"inputs": "This model is amazing!"
|
349 |
+
}})
|
350 |
+
print(output)
|
351 |
+
```
|
352 |
+
"""
|
353 |
+
st.markdown(python_code)
|
354 |
+
|
355 |
+
# JavaScript code example
|
356 |
+
st.markdown("#### JavaScript")
|
357 |
+
js_code = f"""
|
358 |
+
```javascript
|
359 |
+
async function query(data) {{
|
360 |
+
const response = await fetch(
|
361 |
+
"https://api-inference.huggingface.co/models/{model_info.modelId}",
|
362 |
+
{{
|
363 |
+
headers: {{ Authorization: "Bearer YOUR_API_KEY" }},
|
364 |
+
method: "POST",
|
365 |
+
body: JSON.stringify(data),
|
366 |
+
}}
|
367 |
+
);
|
368 |
+
const result = await response.json();
|
369 |
+
return result;
|
370 |
+
}}
|
371 |
+
|
372 |
+
// Example usage
|
373 |
+
query({{"inputs": "This model is amazing!"}}).then((response) => {{
|
374 |
+
console.log(JSON.stringify(response));
|
375 |
+
}});
|
376 |
+
```
|
377 |
+
"""
|
378 |
+
st.markdown(js_code)
|
components/sidebar.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import datetime
|
3 |
+
from utils.auth import logout
|
4 |
+
|
5 |
+
def render_sidebar():
|
6 |
+
"""Render the sidebar with navigation links and user info"""
|
7 |
+
with st.sidebar:
|
8 |
+
# App logo/title
|
9 |
+
st.markdown(
|
10 |
+
"""
|
11 |
+
<div style="text-align: center; margin-bottom: 16px;">
|
12 |
+
<h2 style="margin-bottom: 0;">🤗 HF Model Manager</h2>
|
13 |
+
</div>
|
14 |
+
""",
|
15 |
+
unsafe_allow_html=True,
|
16 |
+
)
|
17 |
+
|
18 |
+
# User information with badge
|
19 |
+
st.markdown(
|
20 |
+
f"""
|
21 |
+
<div style="margin-bottom: 24px; padding: 16px; background-color: #F3F4F6;
|
22 |
+
border-radius: 8px; border: 1px solid #E5E7EB; box-shadow: 0 2px 4px rgba(0,0,0,0.05);">
|
23 |
+
<div style="font-size: 13px; color: #6B7280; margin-bottom: 4px;">Logged in as</div>
|
24 |
+
<div style="display: flex; align-items: center; justify-content: space-between;">
|
25 |
+
<div style="font-weight: bold; font-size: 16px;">{st.session_state.username}</div>
|
26 |
+
<div class="badge hf-badge" style="padding: 2px 6px; font-size: 10px;">
|
27 |
+
<img src="https://huggingface.co/front/assets/huggingface_logo-noborder.svg"
|
28 |
+
alt="HF" style="height: 10px; margin-right: 2px;">
|
29 |
+
Pro
|
30 |
+
</div>
|
31 |
+
</div>
|
32 |
+
<div style="font-size: 12px; color: #6B7280; margin-top: 8px;">
|
33 |
+
Last login: {datetime.datetime.now().strftime("%B %d, %Y")}
|
34 |
+
</div>
|
35 |
+
</div>
|
36 |
+
""",
|
37 |
+
unsafe_allow_html=True,
|
38 |
+
)
|
39 |
+
|
40 |
+
# Navigation with tooltips
|
41 |
+
st.markdown(
|
42 |
+
"""
|
43 |
+
<h3 style="font-size: 16px; margin-bottom: 12px; color: #1A1A1A;">
|
44 |
+
Navigation
|
45 |
+
</h3>
|
46 |
+
""",
|
47 |
+
unsafe_allow_html=True,
|
48 |
+
)
|
49 |
+
|
50 |
+
# Home button
|
51 |
+
if st.button("🏠 Home", use_container_width=True, key="home_btn"):
|
52 |
+
st.session_state.page = "home"
|
53 |
+
st.session_state.selected_model = None
|
54 |
+
st.rerun()
|
55 |
+
|
56 |
+
# Repositories button
|
57 |
+
if st.button(
|
58 |
+
"📚 Manage Repositories", use_container_width=True, key="repo_btn"
|
59 |
+
):
|
60 |
+
st.session_state.page = "repository_management"
|
61 |
+
st.rerun()
|
62 |
+
|
63 |
+
# Create new repository button with prominent styling
|
64 |
+
st.markdown(
|
65 |
+
"""
|
66 |
+
<div style="margin: 16px 0;">
|
67 |
+
""",
|
68 |
+
unsafe_allow_html=True,
|
69 |
+
)
|
70 |
+
if st.button(
|
71 |
+
"➕ Create New Repository",
|
72 |
+
use_container_width=True,
|
73 |
+
key="create_new_repo"
|
74 |
+
):
|
75 |
+
st.session_state.page = "repository_management"
|
76 |
+
st.rerun()
|
77 |
+
|
78 |
+
# Analytics button
|
79 |
+
if st.button("📊 Analytics", use_container_width=True, key="analytics_btn"):
|
80 |
+
st.session_state.page = "analytics"
|
81 |
+
st.session_state.selected_model = None
|
82 |
+
st.rerun()
|
83 |
+
|
84 |
+
# Batch Operations button
|
85 |
+
if st.button("🔄 Batch Operations", use_container_width=True, key="batch_btn"):
|
86 |
+
st.session_state.page = "batch_operations"
|
87 |
+
st.session_state.selected_model = None
|
88 |
+
st.rerun()
|
89 |
+
|
90 |
+
|
91 |
+
# Refresh models button
|
92 |
+
if st.button(
|
93 |
+
"🔄 Refresh Models", use_container_width=True, key="refresh_models"
|
94 |
+
):
|
95 |
+
with st.spinner("Refreshing models..."):
|
96 |
+
try:
|
97 |
+
st.session_state.models = st.session_state.client.get_user_models()
|
98 |
+
st.success("Models refreshed!")
|
99 |
+
except Exception as e:
|
100 |
+
st.error(f"Error refreshing models: {str(e)}")
|
101 |
+
|
102 |
+
# Spacer
|
103 |
+
st.markdown(
|
104 |
+
"""
|
105 |
+
<div style="flex-grow: 1; min-height: 20px;"></div>
|
106 |
+
""",
|
107 |
+
unsafe_allow_html=True,
|
108 |
+
)
|
109 |
+
|
110 |
+
# Bottom links with improved styling
|
111 |
+
st.markdown(
|
112 |
+
"""
|
113 |
+
<hr style="margin: 16px 0; border: none; height: 1px; background-color: #E5E7EB;">
|
114 |
+
""",
|
115 |
+
unsafe_allow_html=True,
|
116 |
+
)
|
117 |
+
|
118 |
+
# Helpful links section
|
119 |
+
st.markdown(
|
120 |
+
"""
|
121 |
+
<div style="margin-bottom: 16px;">
|
122 |
+
<a href="https://huggingface.co/docs" target="_blank" style="display: flex; align-items: center; text-decoration: none; color: #6B7280; font-size: 13px; padding: 8px 0;">
|
123 |
+
<span style="margin-right: 8px;">📚</span> Hugging Face Documentation
|
124 |
+
</a>
|
125 |
+
<a href="https://huggingface.co/models" target="_blank" style="display: flex; align-items: center; text-decoration: none; color: #6B7280; font-size: 13px; padding: 8px 0;">
|
126 |
+
<span style="margin-right: 8px;">🔍</span> Browse Models
|
127 |
+
</a>
|
128 |
+
<a href="https://huggingface.co/spaces" target="_blank" style="display: flex; align-items: center; text-decoration: none; color: #6B7280; font-size: 13px; padding: 8px 0;">
|
129 |
+
<span style="margin-right: 8px;">🚀</span> Explore Spaces
|
130 |
+
</a>
|
131 |
+
</div>
|
132 |
+
""",
|
133 |
+
unsafe_allow_html=True,
|
134 |
+
)
|
135 |
+
|
136 |
+
# Version info
|
137 |
+
st.markdown(
|
138 |
+
"""
|
139 |
+
<div style="font-size: 12px; color: #9CA3AF; text-align: center; margin-bottom: 12px;">
|
140 |
+
HF Model Manager v1.0.0
|
141 |
+
</div>
|
142 |
+
""",
|
143 |
+
unsafe_allow_html=True,
|
144 |
+
)
|
145 |
+
|
146 |
+
# Logout button
|
147 |
+
if st.button("🚪 Logout", use_container_width=True, key="logout_btn"):
|
148 |
+
logout()
|
components/upload_model.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
import tempfile
|
4 |
+
import io
|
5 |
+
import zipfile
|
6 |
+
import shutil
|
7 |
+
|
8 |
+
|
9 |
+
def upload_model_form(model_info):
|
10 |
+
"""Form for uploading model files"""
|
11 |
+
st.subheader("Upload Model Files")
|
12 |
+
|
13 |
+
if not model_info:
|
14 |
+
st.error("Model information not found")
|
15 |
+
return False
|
16 |
+
|
17 |
+
repo_id = model_info.modelId
|
18 |
+
|
19 |
+
st.info(
|
20 |
+
"""
|
21 |
+
Upload model files to your repository. You can upload:
|
22 |
+
|
23 |
+
- Individual model files (e.g., model weights, configuration files)
|
24 |
+
- A ZIP archive containing multiple files (will be extracted automatically)
|
25 |
+
|
26 |
+
**Important**: Large files may take time to upload.
|
27 |
+
"""
|
28 |
+
)
|
29 |
+
|
30 |
+
with st.form("upload_model_form"):
|
31 |
+
# File upload
|
32 |
+
uploaded_files = st.file_uploader(
|
33 |
+
"Upload model files",
|
34 |
+
accept_multiple_files=True,
|
35 |
+
help="Select one or more files to upload",
|
36 |
+
)
|
37 |
+
|
38 |
+
# ZIP handling option
|
39 |
+
extract_zip = st.checkbox(
|
40 |
+
"Extract ZIP files automatically",
|
41 |
+
value=True,
|
42 |
+
help="If enabled, ZIP files will be extracted in the repository",
|
43 |
+
)
|
44 |
+
|
45 |
+
# Commit message
|
46 |
+
commit_message = st.text_input(
|
47 |
+
"Commit message",
|
48 |
+
value="Upload model files",
|
49 |
+
help="Describe the changes you're making",
|
50 |
+
)
|
51 |
+
|
52 |
+
# Submit button
|
53 |
+
submitted = st.form_submit_button("Upload Files", use_container_width=True)
|
54 |
+
|
55 |
+
if submitted and uploaded_files:
|
56 |
+
with st.spinner("Uploading files..."):
|
57 |
+
try:
|
58 |
+
# Create a temporary directory to work with files
|
59 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
60 |
+
files_to_upload = {}
|
61 |
+
|
62 |
+
for uploaded_file in uploaded_files:
|
63 |
+
file_content = uploaded_file.read()
|
64 |
+
file_name = uploaded_file.name
|
65 |
+
|
66 |
+
# Handle ZIP files if extraction is enabled
|
67 |
+
if extract_zip and file_name.lower().endswith(".zip"):
|
68 |
+
with zipfile.ZipFile(
|
69 |
+
io.BytesIO(file_content)
|
70 |
+
) as zip_ref:
|
71 |
+
zip_ref.extractall(
|
72 |
+
os.path.join(temp_dir, "extracted")
|
73 |
+
)
|
74 |
+
|
75 |
+
# Walk through the extracted files
|
76 |
+
for root, _, files in os.walk(
|
77 |
+
os.path.join(temp_dir, "extracted")
|
78 |
+
):
|
79 |
+
for file in files:
|
80 |
+
file_path = os.path.join(root, file)
|
81 |
+
rel_path = os.path.relpath(
|
82 |
+
file_path,
|
83 |
+
os.path.join(temp_dir, "extracted"),
|
84 |
+
)
|
85 |
+
|
86 |
+
with open(file_path, "rb") as f:
|
87 |
+
files_to_upload[rel_path] = f.read()
|
88 |
+
else:
|
89 |
+
# Regular file
|
90 |
+
files_to_upload[file_name] = file_content
|
91 |
+
|
92 |
+
# Upload the files
|
93 |
+
if files_to_upload:
|
94 |
+
success, response = (
|
95 |
+
st.session_state.client.upload_model_files(
|
96 |
+
repo_id=repo_id,
|
97 |
+
files=files_to_upload,
|
98 |
+
commit_message=commit_message,
|
99 |
+
)
|
100 |
+
)
|
101 |
+
|
102 |
+
if success:
|
103 |
+
st.success(
|
104 |
+
f"Successfully uploaded {len(files_to_upload)} files!"
|
105 |
+
)
|
106 |
+
return True
|
107 |
+
else:
|
108 |
+
st.error(f"Failed to upload files: {response}")
|
109 |
+
return False
|
110 |
+
else:
|
111 |
+
st.warning("No files to upload")
|
112 |
+
return False
|
113 |
+
|
114 |
+
except Exception as e:
|
115 |
+
st.error(f"Error uploading files: {str(e)}")
|
116 |
+
return False
|
117 |
+
|
118 |
+
elif submitted:
|
119 |
+
st.warning("Please select files to upload")
|
120 |
+
return False
|
121 |
+
|
122 |
+
return False
|
components/version_control.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
+
from datetime import datetime
|
5 |
+
import difflib
|
6 |
+
import json
|
7 |
+
|
8 |
+
def fetch_model_versions(repo_id):
|
9 |
+
"""
|
10 |
+
Fetch model versions from the Hugging Face API.
|
11 |
+
In a real implementation, this would call the HF API to get version history.
|
12 |
+
For now, we'll use demo data.
|
13 |
+
"""
|
14 |
+
# In a production app, you would fetch this from the Hugging Face API
|
15 |
+
# For demo purposes, create some sample version data
|
16 |
+
versions = [
|
17 |
+
{
|
18 |
+
"version": "v1.0",
|
19 |
+
"commit_id": "abc123",
|
20 |
+
"timestamp": "2023-01-15T10:30:00Z",
|
21 |
+
"author": st.session_state.username,
|
22 |
+
"message": "Initial model release",
|
23 |
+
"files_changed": 5,
|
24 |
+
"performance": {"accuracy": 0.85, "f1": 0.82}
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"version": "v1.1",
|
28 |
+
"commit_id": "def456",
|
29 |
+
"timestamp": "2023-02-20T14:45:00Z",
|
30 |
+
"author": st.session_state.username,
|
31 |
+
"message": "Improved tokenization",
|
32 |
+
"files_changed": 2,
|
33 |
+
"performance": {"accuracy": 0.87, "f1": 0.84}
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"version": "v2.0",
|
37 |
+
"commit_id": "ghi789",
|
38 |
+
"timestamp": "2023-03-10T09:15:00Z",
|
39 |
+
"author": st.session_state.username,
|
40 |
+
"message": "Major model architecture upgrade",
|
41 |
+
"files_changed": 12,
|
42 |
+
"performance": {"accuracy": 0.92, "f1": 0.90}
|
43 |
+
}
|
44 |
+
]
|
45 |
+
return versions
|
46 |
+
|
47 |
+
def render_version_history(model_info):
|
48 |
+
"""Render the version history of a model"""
|
49 |
+
if not model_info:
|
50 |
+
st.error("Model information not found")
|
51 |
+
return
|
52 |
+
|
53 |
+
repo_id = model_info.modelId
|
54 |
+
st.subheader("🔄 Version History")
|
55 |
+
|
56 |
+
with st.spinner("Loading version history..."):
|
57 |
+
versions = fetch_model_versions(repo_id)
|
58 |
+
|
59 |
+
if not versions:
|
60 |
+
st.info("No version history found for this model.")
|
61 |
+
return
|
62 |
+
|
63 |
+
# Convert to DataFrame for easier display
|
64 |
+
df = pd.DataFrame(versions)
|
65 |
+
|
66 |
+
# Format timestamp
|
67 |
+
df["timestamp"] = pd.to_datetime(df["timestamp"]).dt.strftime("%Y-%m-%d %H:%M")
|
68 |
+
|
69 |
+
# Create a cleaner display version
|
70 |
+
display_df = df[["version", "timestamp", "author", "message", "files_changed"]]
|
71 |
+
display_df.columns = ["Version", "Date", "Author", "Commit Message", "Files Changed"]
|
72 |
+
|
73 |
+
# Show the version history
|
74 |
+
st.dataframe(display_df, use_container_width=True)
|
75 |
+
|
76 |
+
# Version comparison
|
77 |
+
st.subheader("Compare Versions")
|
78 |
+
col1, col2 = st.columns(2)
|
79 |
+
|
80 |
+
with col1:
|
81 |
+
base_version = st.selectbox(
|
82 |
+
"Base Version",
|
83 |
+
options=df["version"].tolist(),
|
84 |
+
index=0
|
85 |
+
)
|
86 |
+
|
87 |
+
with col2:
|
88 |
+
compare_version = st.selectbox(
|
89 |
+
"Compare Version",
|
90 |
+
options=[v for v in df["version"].tolist() if v != base_version],
|
91 |
+
index=0
|
92 |
+
)
|
93 |
+
|
94 |
+
if st.button("Compare", use_container_width=True):
|
95 |
+
with st.spinner("Generating comparison..."):
|
96 |
+
# In a real implementation, fetch the actual data from each version
|
97 |
+
# For demo, use the sample performance metrics
|
98 |
+
base_data = df[df["version"] == base_version].iloc[0]
|
99 |
+
compare_data = df[df["version"] == compare_version].iloc[0]
|
100 |
+
|
101 |
+
# Display comparison
|
102 |
+
col1, col2 = st.columns(2)
|
103 |
+
|
104 |
+
with col1:
|
105 |
+
st.markdown(f"### {base_version}")
|
106 |
+
st.markdown(f"**Commit:** {base_data['commit_id']}")
|
107 |
+
st.markdown(f"**Date:** {base_data['timestamp']}")
|
108 |
+
st.markdown(f"**Author:** {base_data['author']}")
|
109 |
+
st.markdown(f"**Message:** {base_data['message']}")
|
110 |
+
|
111 |
+
# Performance metrics
|
112 |
+
st.markdown("#### Performance Metrics")
|
113 |
+
for metric, value in base_data["performance"].items():
|
114 |
+
st.markdown(f"**{metric.capitalize()}:** {value:.4f}")
|
115 |
+
|
116 |
+
with col2:
|
117 |
+
st.markdown(f"### {compare_version}")
|
118 |
+
st.markdown(f"**Commit:** {compare_data['commit_id']}")
|
119 |
+
st.markdown(f"**Date:** {compare_data['timestamp']}")
|
120 |
+
st.markdown(f"**Author:** {compare_data['author']}")
|
121 |
+
st.markdown(f"**Message:** {compare_data['message']}")
|
122 |
+
|
123 |
+
# Performance metrics
|
124 |
+
st.markdown("#### Performance Metrics")
|
125 |
+
for metric, value in compare_data["performance"].items():
|
126 |
+
# Calculate change
|
127 |
+
base_value = base_data["performance"].get(metric, 0)
|
128 |
+
change = value - base_value
|
129 |
+
change_pct = (change / base_value * 100) if base_value != 0 else float('inf')
|
130 |
+
|
131 |
+
# Display with change indicator
|
132 |
+
if change > 0:
|
133 |
+
st.markdown(f"**{metric.capitalize()}:** {value:.4f} 📈 **(+{change:.4f}, {change_pct:.2f}%)**")
|
134 |
+
elif change < 0:
|
135 |
+
st.markdown(f"**{metric.capitalize()}:** {value:.4f} 📉 **({change:.4f}, {change_pct:.2f}%)**")
|
136 |
+
else:
|
137 |
+
st.markdown(f"**{metric.capitalize()}:** {value:.4f} (no change)")
|
138 |
+
|
139 |
+
# Show visual diff of model config
|
140 |
+
st.subheader("Configuration Changes")
|
141 |
+
|
142 |
+
# Sample configs (in a real app, you'd fetch these from the API)
|
143 |
+
base_config = {
|
144 |
+
"hidden_size": 768,
|
145 |
+
"num_attention_heads": 12,
|
146 |
+
"num_hidden_layers": 6,
|
147 |
+
"vocab_size": 30000
|
148 |
+
}
|
149 |
+
|
150 |
+
compare_config = {
|
151 |
+
"hidden_size": 1024,
|
152 |
+
"num_attention_heads": 16,
|
153 |
+
"num_hidden_layers": 8,
|
154 |
+
"vocab_size": 30000
|
155 |
+
}
|
156 |
+
|
157 |
+
# Generate a formatted diff
|
158 |
+
base_str = json.dumps(base_config, indent=2).splitlines()
|
159 |
+
compare_str = json.dumps(compare_config, indent=2).splitlines()
|
160 |
+
|
161 |
+
diff = difflib.unified_diff(
|
162 |
+
base_str,
|
163 |
+
compare_str,
|
164 |
+
fromfile=f'config_{base_version}',
|
165 |
+
tofile=f'config_{compare_version}',
|
166 |
+
lineterm=''
|
167 |
+
)
|
168 |
+
|
169 |
+
diff_html = []
|
170 |
+
for line in diff:
|
171 |
+
if line.startswith('+'):
|
172 |
+
diff_html.append(f'<span style="color: green">{line}</span>')
|
173 |
+
elif line.startswith('-'):
|
174 |
+
diff_html.append(f'<span style="color: red">{line}</span>')
|
175 |
+
elif line.startswith('@@'):
|
176 |
+
diff_html.append(f'<span style="color: purple">{line}</span>')
|
177 |
+
else:
|
178 |
+
diff_html.append(line)
|
179 |
+
|
180 |
+
st.markdown('<div style="background-color: #f5f5f5; padding: 10px; border-radius: 5px; font-family: monospace; white-space: pre-wrap;">' + '<br>'.join(diff_html) + '</div>', unsafe_allow_html=True)
|
181 |
+
|
182 |
+
# Rollback functionality
|
183 |
+
st.subheader("Rollback to Previous Version")
|
184 |
+
rollback_version = st.selectbox(
|
185 |
+
"Select version to rollback to",
|
186 |
+
options=df["version"].tolist(),
|
187 |
+
index=len(df)-2 # Default to second-to-last version
|
188 |
+
)
|
189 |
+
|
190 |
+
if st.button("Rollback", use_container_width=True, type="primary"):
|
191 |
+
with st.spinner("Rolling back to version " + rollback_version):
|
192 |
+
# In a real implementation, this would call the HF API to perform the rollback
|
193 |
+
st.success(f"Successfully rolled back to {rollback_version}")
|
194 |
+
# Here you would update the model information and refresh the view
|
generated-icon.png
ADDED
![]() |
Git LFS Details
|
pages/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# This file is intentionally left empty to make the directory a Python package
|
pages/analytics.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
+
import plotly.express as px
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
import random # Only used for demo data, remove in production
|
8 |
+
|
9 |
+
def generate_demo_data(model_ids, days=30):
|
10 |
+
"""Generate demo data for visualization purposes"""
|
11 |
+
today = datetime.now()
|
12 |
+
data = []
|
13 |
+
|
14 |
+
# For each model, generate 30 days of data
|
15 |
+
for model_id in model_ids:
|
16 |
+
base_downloads = random.randint(10, 1000)
|
17 |
+
base_likes = random.randint(5, 200)
|
18 |
+
|
19 |
+
# Generate daily data with some randomness and trend
|
20 |
+
for i in range(days):
|
21 |
+
date = (today - timedelta(days=days-i-1)).strftime("%Y-%m-%d")
|
22 |
+
daily_downloads = max(1, int(base_downloads * (1 + 0.1 * i/days) * random.uniform(0.8, 1.2)))
|
23 |
+
daily_likes = max(0, int(base_likes * (1 + 0.05 * i/days) * random.uniform(0.7, 1.3)))
|
24 |
+
|
25 |
+
data.append({
|
26 |
+
"model_id": model_id,
|
27 |
+
"date": date,
|
28 |
+
"downloads": base_downloads + daily_downloads,
|
29 |
+
"likes": base_likes + daily_likes,
|
30 |
+
"daily_downloads": daily_downloads,
|
31 |
+
"daily_likes": daily_likes
|
32 |
+
})
|
33 |
+
|
34 |
+
# Update base values for next day (cumulative)
|
35 |
+
base_downloads += daily_downloads
|
36 |
+
base_likes += daily_likes
|
37 |
+
|
38 |
+
return pd.DataFrame(data)
|
39 |
+
|
40 |
+
def render_analytics_page():
|
41 |
+
st.title("📊 Model Performance Analytics")
|
42 |
+
|
43 |
+
if "models" not in st.session_state or not st.session_state.models:
|
44 |
+
st.info("No models found. Please create or import models first.")
|
45 |
+
return
|
46 |
+
|
47 |
+
# Get model IDs from the session state
|
48 |
+
model_ids = [model.modelId for model in st.session_state.models]
|
49 |
+
|
50 |
+
# Time period selection
|
51 |
+
time_period = st.selectbox(
|
52 |
+
"Select Time Period",
|
53 |
+
["Last 7 days", "Last 30 days", "Last 90 days", "All time"],
|
54 |
+
index=1
|
55 |
+
)
|
56 |
+
|
57 |
+
# Convert time period to days
|
58 |
+
days_mapping = {
|
59 |
+
"Last 7 days": 7,
|
60 |
+
"Last 30 days": 30,
|
61 |
+
"Last 90 days": 90,
|
62 |
+
"All time": 180 # Default to 6 months for demo
|
63 |
+
}
|
64 |
+
days = days_mapping[time_period]
|
65 |
+
|
66 |
+
# In a real implementation, we would fetch this data from the Hugging Face API
|
67 |
+
# For now, generate demo data
|
68 |
+
df = generate_demo_data(model_ids, days)
|
69 |
+
|
70 |
+
# Model selection for detailed view
|
71 |
+
selected_models = st.multiselect(
|
72 |
+
"Select Models to Compare",
|
73 |
+
model_ids,
|
74 |
+
default=model_ids[:min(3, len(model_ids))]
|
75 |
+
)
|
76 |
+
|
77 |
+
if not selected_models:
|
78 |
+
st.warning("Please select at least one model to display analytics.")
|
79 |
+
return
|
80 |
+
|
81 |
+
# Filter data for selected models
|
82 |
+
filtered_df = df[df["model_id"].isin(selected_models)]
|
83 |
+
|
84 |
+
# Create tabs for different analytics views
|
85 |
+
tab1, tab2, tab3, tab4 = st.tabs(["Downloads", "Likes", "Growth Rate", "Comparison"])
|
86 |
+
|
87 |
+
with tab1:
|
88 |
+
st.subheader("Downloads Over Time")
|
89 |
+
|
90 |
+
# Cumulative downloads
|
91 |
+
fig_cumulative = px.line(
|
92 |
+
filtered_df,
|
93 |
+
x="date",
|
94 |
+
y="downloads",
|
95 |
+
color="model_id",
|
96 |
+
title="Cumulative Downloads",
|
97 |
+
labels={"downloads": "Total Downloads", "date": "Date", "model_id": "Model"}
|
98 |
+
)
|
99 |
+
st.plotly_chart(fig_cumulative, use_container_width=True)
|
100 |
+
|
101 |
+
# Daily downloads
|
102 |
+
fig_daily = px.bar(
|
103 |
+
filtered_df,
|
104 |
+
x="date",
|
105 |
+
y="daily_downloads",
|
106 |
+
color="model_id",
|
107 |
+
title="Daily Downloads",
|
108 |
+
labels={"daily_downloads": "Daily Downloads", "date": "Date", "model_id": "Model"},
|
109 |
+
barmode="group"
|
110 |
+
)
|
111 |
+
st.plotly_chart(fig_daily, use_container_width=True)
|
112 |
+
|
113 |
+
with tab2:
|
114 |
+
st.subheader("Likes Over Time")
|
115 |
+
|
116 |
+
# Cumulative likes
|
117 |
+
fig_cumulative = px.line(
|
118 |
+
filtered_df,
|
119 |
+
x="date",
|
120 |
+
y="likes",
|
121 |
+
color="model_id",
|
122 |
+
title="Cumulative Likes",
|
123 |
+
labels={"likes": "Total Likes", "date": "Date", "model_id": "Model"}
|
124 |
+
)
|
125 |
+
st.plotly_chart(fig_cumulative, use_container_width=True)
|
126 |
+
|
127 |
+
# Daily likes
|
128 |
+
fig_daily = px.bar(
|
129 |
+
filtered_df,
|
130 |
+
x="date",
|
131 |
+
y="daily_likes",
|
132 |
+
color="model_id",
|
133 |
+
title="Daily Likes",
|
134 |
+
labels={"daily_likes": "Daily Likes", "date": "Date", "model_id": "Model"},
|
135 |
+
barmode="group"
|
136 |
+
)
|
137 |
+
st.plotly_chart(fig_daily, use_container_width=True)
|
138 |
+
|
139 |
+
with tab3:
|
140 |
+
st.subheader("Growth Metrics")
|
141 |
+
|
142 |
+
# Calculate growth rates
|
143 |
+
growth_data = []
|
144 |
+
for model in selected_models:
|
145 |
+
model_data = filtered_df[filtered_df["model_id"] == model]
|
146 |
+
if len(model_data) >= 2:
|
147 |
+
first_day = model_data.iloc[0]
|
148 |
+
last_day = model_data.iloc[-1]
|
149 |
+
|
150 |
+
# Calculate download growth
|
151 |
+
if first_day["downloads"] > 0:
|
152 |
+
download_growth = (last_day["downloads"] - first_day["downloads"]) / first_day["downloads"] * 100
|
153 |
+
else:
|
154 |
+
download_growth = 100 if last_day["downloads"] > 0 else 0
|
155 |
+
|
156 |
+
# Calculate like growth
|
157 |
+
if first_day["likes"] > 0:
|
158 |
+
like_growth = (last_day["likes"] - first_day["likes"]) / first_day["likes"] * 100
|
159 |
+
else:
|
160 |
+
like_growth = 100 if last_day["likes"] > 0 else 0
|
161 |
+
|
162 |
+
growth_data.append({
|
163 |
+
"model_id": model,
|
164 |
+
"download_growth": download_growth,
|
165 |
+
"like_growth": like_growth,
|
166 |
+
"downloads": last_day["downloads"],
|
167 |
+
"likes": last_day["likes"]
|
168 |
+
})
|
169 |
+
|
170 |
+
growth_df = pd.DataFrame(growth_data)
|
171 |
+
|
172 |
+
# Show growth rates
|
173 |
+
if not growth_df.empty:
|
174 |
+
col1, col2 = st.columns(2)
|
175 |
+
|
176 |
+
with col1:
|
177 |
+
fig = px.bar(
|
178 |
+
growth_df,
|
179 |
+
x="model_id",
|
180 |
+
y="download_growth",
|
181 |
+
title="Download Growth Rate (%)",
|
182 |
+
labels={"download_growth": "Growth (%)", "model_id": "Model"},
|
183 |
+
color="download_growth",
|
184 |
+
color_continuous_scale=px.colors.sequential.Blues,
|
185 |
+
)
|
186 |
+
st.plotly_chart(fig, use_container_width=True)
|
187 |
+
|
188 |
+
with col2:
|
189 |
+
fig = px.bar(
|
190 |
+
growth_df,
|
191 |
+
x="model_id",
|
192 |
+
y="like_growth",
|
193 |
+
title="Like Growth Rate (%)",
|
194 |
+
labels={"like_growth": "Growth (%)", "model_id": "Model"},
|
195 |
+
color="like_growth",
|
196 |
+
color_continuous_scale=px.colors.sequential.Reds,
|
197 |
+
)
|
198 |
+
st.plotly_chart(fig, use_container_width=True)
|
199 |
+
else:
|
200 |
+
st.info("Not enough data to calculate growth rates.")
|
201 |
+
|
202 |
+
with tab4:
|
203 |
+
st.subheader("Model Comparison")
|
204 |
+
|
205 |
+
# Get the most recent data point for each model
|
206 |
+
latest_data = filtered_df.groupby("model_id").last().reset_index()
|
207 |
+
|
208 |
+
# Create a radar chart for model comparison
|
209 |
+
categories = ["downloads", "likes", "daily_downloads", "daily_likes"]
|
210 |
+
fig = go.Figure()
|
211 |
+
|
212 |
+
for model in latest_data["model_id"]:
|
213 |
+
model_row = latest_data[latest_data["model_id"] == model].iloc[0]
|
214 |
+
|
215 |
+
# Normalize values for radar chart (0-1 scale)
|
216 |
+
max_vals = latest_data[categories].max()
|
217 |
+
normalized_vals = [model_row[cat]/max_vals[cat] if max_vals[cat] > 0 else 0 for cat in categories]
|
218 |
+
|
219 |
+
fig.add_trace(go.Scatterpolar(
|
220 |
+
r=normalized_vals,
|
221 |
+
theta=["Total Downloads", "Total Likes", "Daily Downloads", "Daily Likes"],
|
222 |
+
fill='toself',
|
223 |
+
name=model
|
224 |
+
))
|
225 |
+
|
226 |
+
fig.update_layout(
|
227 |
+
polar=dict(
|
228 |
+
radialaxis=dict(
|
229 |
+
visible=True,
|
230 |
+
range=[0, 1]
|
231 |
+
)),
|
232 |
+
showlegend=True
|
233 |
+
)
|
234 |
+
|
235 |
+
st.plotly_chart(fig, use_container_width=True)
|
236 |
+
|
237 |
+
# Comparison table
|
238 |
+
st.subheader("Numeric Comparison")
|
239 |
+
comparison_df = latest_data[["model_id", "downloads", "likes", "daily_downloads", "daily_likes"]]
|
240 |
+
comparison_df.columns = ["Model", "Total Downloads", "Total Likes", "Daily Downloads", "Daily Likes"]
|
241 |
+
st.dataframe(comparison_df, use_container_width=True)
|
242 |
+
|
243 |
+
# Analytics insights
|
244 |
+
st.subheader("📈 Key Insights")
|
245 |
+
|
246 |
+
# Calculate some basic insights
|
247 |
+
if not filtered_df.empty:
|
248 |
+
# Most downloaded model
|
249 |
+
most_downloaded = filtered_df.loc[filtered_df.groupby("model_id")["downloads"].idxmax()]
|
250 |
+
# Fastest growing model in terms of downloads
|
251 |
+
growth_rates = []
|
252 |
+
for model in selected_models:
|
253 |
+
model_data = filtered_df[filtered_df["model_id"] == model]
|
254 |
+
if len(model_data) >= 2:
|
255 |
+
first_downloads = model_data.iloc[0]["downloads"]
|
256 |
+
last_downloads = model_data.iloc[-1]["downloads"]
|
257 |
+
growth_rate = (last_downloads - first_downloads) / max(1, first_downloads)
|
258 |
+
growth_rates.append((model, growth_rate))
|
259 |
+
|
260 |
+
col1, col2 = st.columns(2)
|
261 |
+
|
262 |
+
with col1:
|
263 |
+
st.info(f"💡 Most downloaded model: **{most_downloaded['model_id']}** with **{most_downloaded['downloads']}** total downloads")
|
264 |
+
|
265 |
+
if growth_rates:
|
266 |
+
fastest_growing = max(growth_rates, key=lambda x: x[1])
|
267 |
+
st.info(f"💡 Fastest growing model: **{fastest_growing[0]}** with a growth rate of **{fastest_growing[1]*100:.2f}%**")
|
268 |
+
|
269 |
+
with col2:
|
270 |
+
# Most liked model
|
271 |
+
most_liked = filtered_df.loc[filtered_df.groupby("model_id")["likes"].idxmax()]
|
272 |
+
st.info(f"💡 Most liked model: **{most_liked['model_id']}** with **{most_liked['likes']}** total likes")
|
273 |
+
|
274 |
+
# Average daily downloads
|
275 |
+
avg_daily = filtered_df.groupby("model_id")["daily_downloads"].mean().reset_index()
|
276 |
+
highest_avg = avg_daily.loc[avg_daily["daily_downloads"].idxmax()]
|
277 |
+
st.info(f"💡 Highest avg daily downloads: **{highest_avg['model_id']}** with **{highest_avg['daily_downloads']:.1f}** downloads/day")
|
pages/batch_operations.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
+
import time
|
5 |
+
|
6 |
+
def render_batch_operations():
|
7 |
+
"""Render the batch operations page"""
|
8 |
+
st.title("🔄 Batch Operations")
|
9 |
+
|
10 |
+
if "models" not in st.session_state or not st.session_state.models:
|
11 |
+
st.info("No models found. Please create repositories first.")
|
12 |
+
if st.button("Go to Dashboard", use_container_width=True):
|
13 |
+
st.session_state.page = "home"
|
14 |
+
st.experimental_rerun()
|
15 |
+
return
|
16 |
+
|
17 |
+
# Create a dataframe for model selection
|
18 |
+
models_data = []
|
19 |
+
for model in st.session_state.models:
|
20 |
+
try:
|
21 |
+
models_data.append({
|
22 |
+
"Select": False, # Checkbox column
|
23 |
+
"Model Name": model.modelId.split("/")[-1],
|
24 |
+
"Full ID": model.modelId,
|
25 |
+
"Downloads": getattr(model, "downloads", 0),
|
26 |
+
"Likes": getattr(model, "likes", 0),
|
27 |
+
"Private": getattr(model, "private", False),
|
28 |
+
"Tags": ", ".join(getattr(model, "tags", []) or []),
|
29 |
+
})
|
30 |
+
except Exception as e:
|
31 |
+
st.warning(f"Error processing model {getattr(model, 'modelId', 'unknown')}: {str(e)}")
|
32 |
+
|
33 |
+
if not models_data:
|
34 |
+
st.error("Failed to process model data.")
|
35 |
+
return
|
36 |
+
|
37 |
+
# Convert to DataFrame for display
|
38 |
+
df = pd.DataFrame(models_data)
|
39 |
+
|
40 |
+
st.markdown("### Select Models for Batch Operations")
|
41 |
+
st.markdown("Use the checkboxes to select models you want to operate on.")
|
42 |
+
|
43 |
+
# Editable dataframe
|
44 |
+
edited_df = st.data_editor(
|
45 |
+
df,
|
46 |
+
column_config={
|
47 |
+
"Select": st.column_config.CheckboxColumn(
|
48 |
+
"Select",
|
49 |
+
help="Select for batch operations",
|
50 |
+
default=False,
|
51 |
+
),
|
52 |
+
"Full ID": st.column_config.TextColumn(
|
53 |
+
"Repository ID",
|
54 |
+
help="Full repository ID",
|
55 |
+
disabled=True,
|
56 |
+
),
|
57 |
+
"Downloads": st.column_config.NumberColumn(
|
58 |
+
"Downloads",
|
59 |
+
help="Number of downloads",
|
60 |
+
disabled=True,
|
61 |
+
),
|
62 |
+
"Likes": st.column_config.NumberColumn(
|
63 |
+
"Likes",
|
64 |
+
help="Number of likes",
|
65 |
+
disabled=True,
|
66 |
+
),
|
67 |
+
"Private": st.column_config.CheckboxColumn(
|
68 |
+
"Private",
|
69 |
+
help="Repository visibility",
|
70 |
+
disabled=True,
|
71 |
+
),
|
72 |
+
"Tags": st.column_config.TextColumn(
|
73 |
+
"Tags",
|
74 |
+
help="Current tags",
|
75 |
+
disabled=True,
|
76 |
+
),
|
77 |
+
},
|
78 |
+
hide_index=True,
|
79 |
+
use_container_width=True,
|
80 |
+
)
|
81 |
+
|
82 |
+
# Get selected models
|
83 |
+
selected_models = edited_df[edited_df["Select"] == True]
|
84 |
+
selected_count = len(selected_models)
|
85 |
+
|
86 |
+
if selected_count > 0:
|
87 |
+
st.success(f"Selected {selected_count} models for batch operations.")
|
88 |
+
else:
|
89 |
+
st.info("Please select at least one model to perform batch operations.")
|
90 |
+
|
91 |
+
# Batch operations tabs
|
92 |
+
if selected_count > 0:
|
93 |
+
tab1, tab2, tab3, tab4 = st.tabs(["Update Tags", "Update Visibility", "Add Collaborators", "Delete"])
|
94 |
+
|
95 |
+
with tab1:
|
96 |
+
st.subheader("Update Tags")
|
97 |
+
|
98 |
+
# Get available tags
|
99 |
+
available_tags = st.session_state.client.get_model_tags()
|
100 |
+
|
101 |
+
# Tags selection
|
102 |
+
selected_tags = st.multiselect(
|
103 |
+
"Select tags to add to all selected models",
|
104 |
+
options=available_tags,
|
105 |
+
help="These tags will be added to all selected models"
|
106 |
+
)
|
107 |
+
|
108 |
+
tags_action = st.radio(
|
109 |
+
"Tag Operation",
|
110 |
+
["Add tags (keep existing)", "Replace tags (remove existing)"],
|
111 |
+
index=0
|
112 |
+
)
|
113 |
+
|
114 |
+
if st.button("Apply Tags", use_container_width=True, type="primary"):
|
115 |
+
if not selected_tags:
|
116 |
+
st.warning("Please select at least one tag to add.")
|
117 |
+
else:
|
118 |
+
with st.spinner(f"Updating tags for {selected_count} models..."):
|
119 |
+
# Track success and failures
|
120 |
+
successes = 0
|
121 |
+
failures = []
|
122 |
+
|
123 |
+
# Process each selected model
|
124 |
+
for idx, row in selected_models.iterrows():
|
125 |
+
try:
|
126 |
+
repo_id = row["Full ID"]
|
127 |
+
model_info = st.session_state.client.get_model_info(repo_id)
|
128 |
+
|
129 |
+
if model_info:
|
130 |
+
# Get current model card content
|
131 |
+
try:
|
132 |
+
model_card_url = f"https://huggingface.co/{repo_id}/raw/main/README.md"
|
133 |
+
response = st.session_state.client.api._get_paginated(model_card_url)
|
134 |
+
if response.status_code != 200:
|
135 |
+
failures.append((repo_id, "Failed to fetch model card"))
|
136 |
+
continue
|
137 |
+
|
138 |
+
model_card_content = response.text
|
139 |
+
|
140 |
+
# Update tags in the model card
|
141 |
+
import re
|
142 |
+
yaml_match = re.search(r"---\s+(.*?)\s+---", model_card_content, re.DOTALL)
|
143 |
+
|
144 |
+
if yaml_match:
|
145 |
+
yaml_content = yaml_match.group(1)
|
146 |
+
tags_match = re.search(r"tags:\s*((?:- .*?\n)+)", yaml_content, re.DOTALL)
|
147 |
+
|
148 |
+
if tags_match and tags_action == "Add tags (keep existing)":
|
149 |
+
# Extract existing tags
|
150 |
+
existing_tags = [
|
151 |
+
line.strip("- \n")
|
152 |
+
for line in tags_match.group(1).split("\n")
|
153 |
+
if line.strip().startswith("-")
|
154 |
+
]
|
155 |
+
|
156 |
+
# Combine existing and new tags
|
157 |
+
all_tags = list(set(existing_tags + selected_tags))
|
158 |
+
|
159 |
+
# Replace tags section
|
160 |
+
new_yaml = yaml_content.replace(
|
161 |
+
tags_match.group(0),
|
162 |
+
f"tags:\n" + "\n".join([f"- {tag}" for tag in all_tags]) + "\n",
|
163 |
+
)
|
164 |
+
|
165 |
+
# Update the model card
|
166 |
+
new_content = model_card_content.replace(
|
167 |
+
yaml_match.group(0), f"---\n{new_yaml}---"
|
168 |
+
)
|
169 |
+
elif tags_match and tags_action == "Replace tags (remove existing)":
|
170 |
+
# Replace tags section
|
171 |
+
new_yaml = yaml_content.replace(
|
172 |
+
tags_match.group(0),
|
173 |
+
f"tags:\n" + "\n".join([f"- {tag}" for tag in selected_tags]) + "\n",
|
174 |
+
)
|
175 |
+
|
176 |
+
# Update the model card
|
177 |
+
new_content = model_card_content.replace(
|
178 |
+
yaml_match.group(0), f"---\n{new_yaml}---"
|
179 |
+
)
|
180 |
+
elif tags_action == "Add tags (keep existing)" or tags_action == "Replace tags (remove existing)":
|
181 |
+
# Add tags section if it doesn't exist
|
182 |
+
new_yaml = yaml_content + f"\ntags:\n" + "\n".join([f"- {tag}" for tag in selected_tags]) + "\n"
|
183 |
+
|
184 |
+
# Update the model card
|
185 |
+
new_content = model_card_content.replace(
|
186 |
+
yaml_match.group(0), f"---\n{new_yaml}---"
|
187 |
+
)
|
188 |
+
else:
|
189 |
+
failures.append((repo_id, "Failed to update tags in model card"))
|
190 |
+
continue
|
191 |
+
else:
|
192 |
+
# Add YAML frontmatter with tags
|
193 |
+
tags_yaml = "---\ntags:\n" + "\n".join([f"- {tag}" for tag in selected_tags]) + "\n---\n\n"
|
194 |
+
new_content = tags_yaml + model_card_content
|
195 |
+
|
196 |
+
# Update the model card
|
197 |
+
success, _ = st.session_state.client.update_model_card(repo_id, new_content)
|
198 |
+
|
199 |
+
if success:
|
200 |
+
successes += 1
|
201 |
+
else:
|
202 |
+
failures.append((repo_id, "Failed to update model card"))
|
203 |
+
|
204 |
+
except Exception as e:
|
205 |
+
failures.append((repo_id, str(e)))
|
206 |
+
else:
|
207 |
+
failures.append((repo_id, "Failed to fetch model info"))
|
208 |
+
|
209 |
+
except Exception as e:
|
210 |
+
failures.append((row["Full ID"], str(e)))
|
211 |
+
|
212 |
+
# Show results
|
213 |
+
if successes > 0:
|
214 |
+
st.success(f"Successfully updated tags for {successes} models")
|
215 |
+
|
216 |
+
if failures:
|
217 |
+
st.error(f"Failed to update {len(failures)} models")
|
218 |
+
for repo_id, error in failures:
|
219 |
+
st.warning(f"Failed to update {repo_id}: {error}")
|
220 |
+
|
221 |
+
# Refresh models after batch operation
|
222 |
+
st.session_state.models = st.session_state.client.get_user_models()
|
223 |
+
st.info("Model list refreshed. You may need to wait a few minutes for all changes to propagate.")
|
224 |
+
|
225 |
+
with tab2:
|
226 |
+
st.subheader("Update Visibility")
|
227 |
+
|
228 |
+
visibility = st.radio(
|
229 |
+
"Set visibility for selected models",
|
230 |
+
["Public", "Private"],
|
231 |
+
index=0,
|
232 |
+
help="Change the visibility of all selected models"
|
233 |
+
)
|
234 |
+
|
235 |
+
if st.button("Update Visibility", use_container_width=True, type="primary"):
|
236 |
+
with st.spinner(f"Updating visibility for {selected_count} models..."):
|
237 |
+
st.warning("This feature requires Hugging Face Pro or Enterprise subscription.")
|
238 |
+
st.info("In the actual implementation, this would update the models' visibility settings.")
|
239 |
+
|
240 |
+
# This is a placeholder for the actual implementation
|
241 |
+
time.sleep(2)
|
242 |
+
st.success(f"Successfully updated visibility for {selected_count} models")
|
243 |
+
|
244 |
+
with tab3:
|
245 |
+
st.subheader("Add Collaborators")
|
246 |
+
|
247 |
+
collaborators = st.text_area(
|
248 |
+
"Enter usernames of collaborators (one per line)",
|
249 |
+
help="These users will be added as collaborators to all selected models"
|
250 |
+
)
|
251 |
+
|
252 |
+
role = st.selectbox(
|
253 |
+
"Collaborator role",
|
254 |
+
["read", "write", "admin"],
|
255 |
+
index=0
|
256 |
+
)
|
257 |
+
|
258 |
+
if st.button("Add Collaborators", use_container_width=True, type="primary"):
|
259 |
+
if not collaborators.strip():
|
260 |
+
st.warning("Please enter at least one collaborator username.")
|
261 |
+
else:
|
262 |
+
with st.spinner(f"Adding collaborators to {selected_count} models..."):
|
263 |
+
# This is a placeholder for the actual implementation
|
264 |
+
collaborator_list = [c.strip() for c in collaborators.split("\n") if c.strip()]
|
265 |
+
st.info(f"Adding {len(collaborator_list)} collaborators with '{role}' role to {selected_count} models.")
|
266 |
+
st.warning("This feature requires Hugging Face Pro or Enterprise subscription.")
|
267 |
+
|
268 |
+
time.sleep(2)
|
269 |
+
st.success(f"Successfully added collaborators to {selected_count} models")
|
270 |
+
|
271 |
+
with tab4:
|
272 |
+
st.subheader("⚠️ Delete Models")
|
273 |
+
|
274 |
+
st.warning(
|
275 |
+
"This operation is irreversible. All selected models will be permanently deleted."
|
276 |
+
)
|
277 |
+
|
278 |
+
# Confirmation
|
279 |
+
confirmation = st.text_input(
|
280 |
+
"Type 'DELETE' to confirm deletion of all selected models",
|
281 |
+
key="batch_delete_confirm"
|
282 |
+
)
|
283 |
+
|
284 |
+
if st.button("Delete Selected Models", use_container_width=True, type="primary"):
|
285 |
+
if confirmation != "DELETE":
|
286 |
+
st.error("Please type 'DELETE' to confirm.")
|
287 |
+
else:
|
288 |
+
with st.spinner(f"Deleting {selected_count} models..."):
|
289 |
+
# Track success and failures
|
290 |
+
successes = 0
|
291 |
+
failures = []
|
292 |
+
|
293 |
+
# Process each selected model
|
294 |
+
for idx, row in selected_models.iterrows():
|
295 |
+
try:
|
296 |
+
repo_id = row["Full ID"]
|
297 |
+
|
298 |
+
# Delete the repository
|
299 |
+
success, message = st.session_state.client.delete_model_repository(repo_id)
|
300 |
+
|
301 |
+
if success:
|
302 |
+
successes += 1
|
303 |
+
else:
|
304 |
+
failures.append((repo_id, message))
|
305 |
+
|
306 |
+
except Exception as e:
|
307 |
+
failures.append((row["Full ID"], str(e)))
|
308 |
+
|
309 |
+
# Show results
|
310 |
+
if successes > 0:
|
311 |
+
st.success(f"Successfully deleted {successes} models")
|
312 |
+
|
313 |
+
if failures:
|
314 |
+
st.error(f"Failed to delete {len(failures)} models")
|
315 |
+
for repo_id, error in failures:
|
316 |
+
st.warning(f"Failed to delete {repo_id}: {error}")
|
317 |
+
|
318 |
+
# Refresh models after batch operation
|
319 |
+
st.session_state.models = st.session_state.client.get_user_models()
|
pages/home.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
+
from datetime import datetime
|
5 |
+
|
6 |
+
def render_home():
|
7 |
+
"""Render the home page with model overview and statistics"""
|
8 |
+
st.title("🤗 Hugging Face Model Manager")
|
9 |
+
|
10 |
+
st.markdown(
|
11 |
+
"""
|
12 |
+
Welcome to your personal Hugging Face model management dashboard.
|
13 |
+
From here, you can view, create, and manage your machine learning models.
|
14 |
+
"""
|
15 |
+
)
|
16 |
+
|
17 |
+
# Check if we have models loaded
|
18 |
+
if not st.session_state.get("models"):
|
19 |
+
with st.spinner("Loading your models..."):
|
20 |
+
try:
|
21 |
+
st.session_state.models = st.session_state.client.get_user_models()
|
22 |
+
except Exception as e:
|
23 |
+
st.error(f"Error loading models: {str(e)}")
|
24 |
+
|
25 |
+
# Model Statistics Dashboard
|
26 |
+
st.markdown("### 📊 Model Statistics")
|
27 |
+
|
28 |
+
# Key statistics in cards
|
29 |
+
col1, col2, col3 = st.columns([1, 1, 1])
|
30 |
+
|
31 |
+
with col1:
|
32 |
+
# Total models count
|
33 |
+
total_models = len(st.session_state.models)
|
34 |
+
st.markdown(
|
35 |
+
f"""
|
36 |
+
<div class="tooltip" style="width: 100%;">
|
37 |
+
<div style="padding: 20px; background-color: #F9FAFB; border-radius: 10px; border: 1px solid #E5E7EB; text-align: center; box-shadow: 0 2px 4px rgba(0,0,0,0.05); transition: all 0.3s;">
|
38 |
+
<div style="font-size: 36px; color: #6366F1; font-weight: bold;">
|
39 |
+
{total_models}
|
40 |
+
</div>
|
41 |
+
<div style="color: #6B7280; font-weight: 500;">
|
42 |
+
Total Models
|
43 |
+
</div>
|
44 |
+
</div>
|
45 |
+
<span class="tooltip-text">Total number of models you've created</span>
|
46 |
+
</div>
|
47 |
+
""",
|
48 |
+
unsafe_allow_html=True,
|
49 |
+
)
|
50 |
+
|
51 |
+
with col2:
|
52 |
+
# Total downloads (sum from all models)
|
53 |
+
total_downloads = sum(
|
54 |
+
getattr(model, "downloads", 0) for model in st.session_state.models
|
55 |
+
)
|
56 |
+
st.markdown(
|
57 |
+
f"""
|
58 |
+
<div class="tooltip" style="width: 100%;">
|
59 |
+
<div style="padding: 20px; background-color: #F9FAFB; border-radius: 10px; border: 1px solid #E5E7EB; text-align: center; box-shadow: 0 2px 4px rgba(0,0,0,0.05); transition: all 0.3s;">
|
60 |
+
<div style="font-size: 36px; color: #10B981; font-weight: bold;">
|
61 |
+
{total_downloads:,}
|
62 |
+
</div>
|
63 |
+
<div style="color: #6B7280; font-weight: 500;">
|
64 |
+
Total Downloads
|
65 |
+
</div>
|
66 |
+
</div>
|
67 |
+
<span class="tooltip-text">Cumulative downloads across all your models</span>
|
68 |
+
</div>
|
69 |
+
""",
|
70 |
+
unsafe_allow_html=True,
|
71 |
+
)
|
72 |
+
|
73 |
+
with col3:
|
74 |
+
# Calculate total likes
|
75 |
+
total_likes = sum(
|
76 |
+
getattr(model, "likes", 0) for model in st.session_state.models
|
77 |
+
)
|
78 |
+
st.markdown(
|
79 |
+
f"""
|
80 |
+
<div class="tooltip" style="width: 100%;">
|
81 |
+
<div style="padding: 20px; background-color: #F9FAFB; border-radius: 10px; border: 1px solid #E5E7EB; text-align: center; box-shadow: 0 2px 4px rgba(0,0,0,0.05); transition: all 0.3s;">
|
82 |
+
<div style="font-size: 36px; color: #FF9D96; font-weight: bold;">
|
83 |
+
{total_likes}
|
84 |
+
</div>
|
85 |
+
<div style="color: #6B7280; font-weight: 500;">
|
86 |
+
Total Likes
|
87 |
+
</div>
|
88 |
+
</div>
|
89 |
+
<span class="tooltip-text">Cumulative likes across all your models</span>
|
90 |
+
</div>
|
91 |
+
""",
|
92 |
+
unsafe_allow_html=True,
|
93 |
+
)
|
94 |
+
|
95 |
+
# Quick Actions with improved styling
|
96 |
+
st.markdown("### 🚀 Quick Actions")
|
97 |
+
|
98 |
+
quick_actions_col1, quick_actions_col2 = st.columns([1, 1])
|
99 |
+
|
100 |
+
with quick_actions_col1:
|
101 |
+
if st.button(
|
102 |
+
"➕ Create New Repository", key="create_repo_home", use_container_width=True
|
103 |
+
):
|
104 |
+
st.session_state.page = "repository_management"
|
105 |
+
st.experimental_rerun()
|
106 |
+
|
107 |
+
with quick_actions_col2:
|
108 |
+
if st.button(
|
109 |
+
"🔄 Refresh Models", key="refresh_models_home", use_container_width=True
|
110 |
+
):
|
111 |
+
with st.spinner("Refreshing models..."):
|
112 |
+
try:
|
113 |
+
st.session_state.models = st.session_state.client.get_user_models()
|
114 |
+
st.success("Models refreshed!")
|
115 |
+
except Exception as e:
|
116 |
+
st.error(f"Error refreshing models: {str(e)}")
|
117 |
+
|
118 |
+
# Your Models section
|
119 |
+
st.markdown("### 📚 Your Models")
|
120 |
+
|
121 |
+
if not st.session_state.models:
|
122 |
+
st.info(
|
123 |
+
"You don't have any models yet. Click 'Create New Repository' to get started!"
|
124 |
+
)
|
125 |
+
else:
|
126 |
+
# Create dataframe from models list for display
|
127 |
+
models_data = []
|
128 |
+
for model in st.session_state.models:
|
129 |
+
try:
|
130 |
+
# Extract key data
|
131 |
+
last_modified = (
|
132 |
+
datetime.fromisoformat(model.lastModified.replace("Z", "+00:00"))
|
133 |
+
if hasattr(model, "lastModified")
|
134 |
+
else None
|
135 |
+
)
|
136 |
+
|
137 |
+
model_data = {
|
138 |
+
"Model Name": model.modelId.split("/")[-1],
|
139 |
+
"Full ID": model.modelId,
|
140 |
+
"Downloads": getattr(model, "downloads", 0),
|
141 |
+
"Likes": getattr(model, "likes", 0),
|
142 |
+
"Last Modified": last_modified,
|
143 |
+
"Private": getattr(model, "private", False),
|
144 |
+
}
|
145 |
+
models_data.append(model_data)
|
146 |
+
except Exception as e:
|
147 |
+
st.warning(f"Error processing model {getattr(model, 'modelId', 'unknown')}: {str(e)}")
|
148 |
+
|
149 |
+
# Sorting
|
150 |
+
sort_options = ["Last Modified", "Downloads", "Likes", "Model Name"]
|
151 |
+
sort_by = st.selectbox("Sort by", sort_options, index=0)
|
152 |
+
|
153 |
+
# Create DataFrame and sort
|
154 |
+
if models_data:
|
155 |
+
df = pd.DataFrame(models_data)
|
156 |
+
if sort_by == "Last Modified":
|
157 |
+
df = df.sort_values(by=sort_by, ascending=False)
|
158 |
+
elif sort_by in ["Downloads", "Likes"]:
|
159 |
+
df = df.sort_values(by=sort_by, ascending=False)
|
160 |
+
else:
|
161 |
+
df = df.sort_values(by=sort_by)
|
162 |
+
|
163 |
+
# Format the Last Modified date
|
164 |
+
df["Last Modified"] = df["Last Modified"].apply(
|
165 |
+
lambda x: x.strftime("%b %d, %Y") if pd.notnull(x) else "N/A"
|
166 |
+
)
|
167 |
+
|
168 |
+
# Display models as cards
|
169 |
+
for i, row in df.iterrows():
|
170 |
+
with st.container():
|
171 |
+
col1, col2 = st.columns([3, 1])
|
172 |
+
with col1:
|
173 |
+
st.markdown(
|
174 |
+
f"""
|
175 |
+
<div style="padding: 16px; background-color: #F9FAFB; border-radius: 8px; border: 1px solid #E5E7EB; margin-bottom: 16px; cursor: pointer; transition: all 0.3s;"
|
176 |
+
onclick="window.open('https://huggingface.co/{row['Full ID']}', '_blank')">
|
177 |
+
<div style="display: flex; justify-content: space-between; align-items: center;">
|
178 |
+
<div>
|
179 |
+
<h3 style="margin: 0; color: #111827;">{row['Model Name']}</h3>
|
180 |
+
<p style="margin: 4px 0 0 0; color: #6B7280; font-size: 14px;">{row['Full ID']}</p>
|
181 |
+
</div>
|
182 |
+
<div style="display: flex; align-items: center;">
|
183 |
+
<div style="margin-right: 16px; text-align: center;">
|
184 |
+
<div style="font-weight: bold; color: #10B981;">{row['Downloads']:,}</div>
|
185 |
+
<div style="font-size: 12px; color: #6B7280;">downloads</div>
|
186 |
+
</div>
|
187 |
+
<div style="margin-right: 16px; text-align: center;">
|
188 |
+
<div style="font-weight: bold; color: #FF9D96;">{row['Likes']}</div>
|
189 |
+
<div style="font-size: 12px; color: #6B7280;">likes</div>
|
190 |
+
</div>
|
191 |
+
<div style="text-align: center;">
|
192 |
+
<div style="font-weight: bold; color: #6366F1;">{row['Last Modified']}</div>
|
193 |
+
<div style="font-size: 12px; color: #6B7280;">updated</div>
|
194 |
+
</div>
|
195 |
+
</div>
|
196 |
+
</div>
|
197 |
+
</div>
|
198 |
+
""",
|
199 |
+
unsafe_allow_html=True,
|
200 |
+
)
|
201 |
+
with col2:
|
202 |
+
if st.button(
|
203 |
+
"📝 Manage",
|
204 |
+
key=f"manage_{row['Full ID']}",
|
205 |
+
use_container_width=True
|
206 |
+
):
|
207 |
+
st.session_state.selected_model = row["Full ID"]
|
208 |
+
st.session_state.page = "model_details"
|
209 |
+
st.experimental_rerun()
|
pages/model_details.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import time
|
3 |
+
from components.edit_model import edit_model_form
|
4 |
+
from components.upload_model import upload_model_form
|
5 |
+
|
6 |
+
|
7 |
+
def render_model_details():
|
8 |
+
"""Render the model details page"""
|
9 |
+
|
10 |
+
if not st.session_state.selected_model:
|
11 |
+
st.error("No model selected. Please select a model from the sidebar.")
|
12 |
+
if st.button("Go back to Dashboard", use_container_width=True):
|
13 |
+
st.session_state.page = "home"
|
14 |
+
st.rerun()
|
15 |
+
return
|
16 |
+
|
17 |
+
# Get model info from Hugging Face API
|
18 |
+
with st.spinner("Loading model details..."):
|
19 |
+
try:
|
20 |
+
repo_id = st.session_state.selected_model
|
21 |
+
model_info = st.session_state.client.get_model_info(repo_id)
|
22 |
+
|
23 |
+
if not model_info:
|
24 |
+
st.error(
|
25 |
+
"Model not found. It may have been deleted or you don't have access."
|
26 |
+
)
|
27 |
+
if st.button("Go back to Dashboard", use_container_width=True):
|
28 |
+
st.session_state.page = "home"
|
29 |
+
st.rerun()
|
30 |
+
return
|
31 |
+
except Exception as e:
|
32 |
+
st.error(f"Error loading model details: {str(e)}")
|
33 |
+
if st.button("Go back to Dashboard", use_container_width=True):
|
34 |
+
st.session_state.page = "home"
|
35 |
+
st.rerun()
|
36 |
+
return
|
37 |
+
|
38 |
+
# Display model header
|
39 |
+
st.title(f"Model: {repo_id}")
|
40 |
+
|
41 |
+
# Display model information
|
42 |
+
col1, col2 = st.columns([2, 1])
|
43 |
+
|
44 |
+
with col1:
|
45 |
+
st.markdown(
|
46 |
+
f"""
|
47 |
+
<div style="background-color: #F9FAFB; padding: 20px; border-radius: 10px; border: 1px solid #E5E7EB;">
|
48 |
+
<div style="display: flex; align-items: center; justify-content: space-between; margin-bottom: 10px;">
|
49 |
+
<h3 style="margin: 0;">{repo_id.split('/')[-1]}</h3>
|
50 |
+
<div>
|
51 |
+
<span style="background-color: #FFD21E; padding: 5px 10px; border-radius: 20px; font-size: 12px; margin-right: 5px;">
|
52 |
+
{model_info.modelId.split('/')[0]}
|
53 |
+
</span>
|
54 |
+
<span style="background-color: #84ADFF; padding: 5px 10px; border-radius: 20px; font-size: 12px;">
|
55 |
+
{model_info.pipeline_tag if hasattr(model_info, 'pipeline_tag') else 'model'}
|
56 |
+
</span>
|
57 |
+
</div>
|
58 |
+
</div>
|
59 |
+
<div style="margin-bottom: 10px;">
|
60 |
+
<a href="https://huggingface.co/{repo_id}" target="_blank" style="color: #84ADFF; text-decoration: none;">
|
61 |
+
View on Hugging Face Hub 🔗
|
62 |
+
</a>
|
63 |
+
</div>
|
64 |
+
<div style="display: flex; gap: 20px; margin-top: 20px;">
|
65 |
+
<div>
|
66 |
+
<div style="color: #6B7280; font-size: 12px;">DOWNLOADS</div>
|
67 |
+
<div style="font-weight: bold;">{getattr(model_info, 'downloads', 0)}</div>
|
68 |
+
</div>
|
69 |
+
<div>
|
70 |
+
<div style="color: #6B7280; font-size: 12px;">LIKES</div>
|
71 |
+
<div style="font-weight: bold;">{getattr(model_info, 'likes', 0)}</div>
|
72 |
+
</div>
|
73 |
+
<div>
|
74 |
+
<div style="color: #6B7280; font-size: 12px;">LAST MODIFIED</div>
|
75 |
+
<div style="font-weight: bold;">
|
76 |
+
{getattr(model_info, 'lastModified', 'Unknown').split('T')[0] if hasattr(model_info, 'lastModified') else 'Unknown'}
|
77 |
+
</div>
|
78 |
+
</div>
|
79 |
+
</div>
|
80 |
+
</div>
|
81 |
+
""",
|
82 |
+
unsafe_allow_html=True,
|
83 |
+
)
|
84 |
+
|
85 |
+
with col2:
|
86 |
+
st.markdown(
|
87 |
+
f"""
|
88 |
+
<div style="background-color: #F9FAFB; padding: 20px; border-radius: 10px; border: 1px solid #E5E7EB; height: 100%;">
|
89 |
+
<div style="color: #6B7280; font-size: 12px; margin-bottom: 10px;">TAGS</div>
|
90 |
+
<div style="display: flex; flex-wrap: wrap; gap: 5px;">
|
91 |
+
{' '.join([f'<span style="background-color: #E5E7EB; padding: 5px 10px; border-radius: 20px; font-size: 12px;">{tag}</span>' for tag in (model_info.tags if hasattr(model_info, 'tags') else [])])}
|
92 |
+
</div>
|
93 |
+
</div>
|
94 |
+
""",
|
95 |
+
unsafe_allow_html=True,
|
96 |
+
)
|
97 |
+
|
98 |
+
# Tabs for different actions
|
99 |
+
tab1, tab2, tab3, tab4, tab5, tab6 = st.tabs(["Model Card", "Upload Files", "Edit Model", "Version Control", "Test Inference", "Auto Documentation"])
|
100 |
+
|
101 |
+
with tab1:
|
102 |
+
st.markdown("### Model Card")
|
103 |
+
|
104 |
+
# Display model card iframe
|
105 |
+
st.markdown(
|
106 |
+
f"""
|
107 |
+
<iframe src="https://huggingface.co/{repo_id}" width="100%" height="600" style="border: 1px solid #E5E7EB; border-radius: 10px;"></iframe>
|
108 |
+
""",
|
109 |
+
unsafe_allow_html=True,
|
110 |
+
)
|
111 |
+
|
112 |
+
with tab2:
|
113 |
+
success = upload_model_form(model_info)
|
114 |
+
if success:
|
115 |
+
# Refresh model info after successful upload
|
116 |
+
time.sleep(2) # Wait for the API to update
|
117 |
+
st.session_state.selected_model = repo_id # Keep the same model selected
|
118 |
+
st.rerun()
|
119 |
+
|
120 |
+
with tab3:
|
121 |
+
success, _ = edit_model_form(model_info)
|
122 |
+
if success:
|
123 |
+
# Refresh model info after successful edit
|
124 |
+
time.sleep(2) # Wait for the API to update
|
125 |
+
st.session_state.selected_model = repo_id # Keep the same model selected
|
126 |
+
st.rerun()
|
127 |
+
|
128 |
+
with tab4:
|
129 |
+
from components.version_control import render_version_history
|
130 |
+
render_version_history(model_info)
|
131 |
+
|
132 |
+
with tab5:
|
133 |
+
from components.model_inference import model_inference_dashboard
|
134 |
+
model_inference_dashboard(model_info)
|
135 |
+
|
136 |
+
with tab6:
|
137 |
+
from components.documentation_generator import model_documentation_generator
|
138 |
+
model_documentation_generator(model_info)
|
139 |
+
|
140 |
+
# Delete model option
|
141 |
+
st.markdown("---")
|
142 |
+
with st.expander("⚠️ Danger Zone"):
|
143 |
+
st.warning(
|
144 |
+
"Deleting a repository is irreversible. All files and data will be permanently lost."
|
145 |
+
)
|
146 |
+
|
147 |
+
# Confirmation input
|
148 |
+
confirm_text = st.text_input(
|
149 |
+
f"Type the repository name '{repo_id.split('/')[-1]}' to confirm deletion:",
|
150 |
+
key="confirm_delete_input",
|
151 |
+
)
|
152 |
+
|
153 |
+
if st.button(
|
154 |
+
"🗑️ Delete Repository", key="delete_repo_btn", use_container_width=True
|
155 |
+
):
|
156 |
+
if confirm_text == repo_id.split("/")[-1]:
|
157 |
+
with st.spinner("Deleting repository..."):
|
158 |
+
try:
|
159 |
+
success, message = (
|
160 |
+
st.session_state.client.delete_model_repository(repo_id)
|
161 |
+
)
|
162 |
+
|
163 |
+
if success:
|
164 |
+
st.success("Repository deleted successfully!")
|
165 |
+
# Refresh models and go back to home
|
166 |
+
st.session_state.models = (
|
167 |
+
st.session_state.client.get_user_models()
|
168 |
+
)
|
169 |
+
st.session_state.page = "home"
|
170 |
+
st.session_state.selected_model = None
|
171 |
+
st.rerun()
|
172 |
+
else:
|
173 |
+
st.error(f"Failed to delete repository: {message}")
|
174 |
+
except Exception as e:
|
175 |
+
st.error(f"Error deleting repository: {str(e)}")
|
176 |
+
else:
|
177 |
+
st.error("Repository name doesn't match. Deletion aborted.")
|
pages/repository_management.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
from components.create_repository import create_repository_form
|
4 |
+
|
5 |
+
def render_repository_management():
|
6 |
+
"""Render the repository management page"""
|
7 |
+
st.title("🗄️ Repository Management")
|
8 |
+
|
9 |
+
st.markdown(
|
10 |
+
"""
|
11 |
+
Create and manage your Hugging Face model repositories.
|
12 |
+
A repository is where you store model files, configuration, and documentation.
|
13 |
+
"""
|
14 |
+
)
|
15 |
+
|
16 |
+
# Create new repository section
|
17 |
+
created, repo_id = create_repository_form()
|
18 |
+
|
19 |
+
if created and repo_id:
|
20 |
+
# If repository was created, navigate to model details page
|
21 |
+
st.session_state.selected_model = repo_id
|
22 |
+
st.session_state.page = "model_details"
|
23 |
+
st.rerun()
|
24 |
+
|
25 |
+
# Tips for repository creation
|
26 |
+
with st.expander("Tips for creating a good repository"):
|
27 |
+
st.markdown(
|
28 |
+
"""
|
29 |
+
### Best Practices for Model Repositories
|
30 |
+
|
31 |
+
1. **Choose a descriptive name**
|
32 |
+
- Use clear, lowercase names with hyphens (e.g., `bert-finetuned-sentiment`)
|
33 |
+
- Avoid generic names like "test" or "model"
|
34 |
+
|
35 |
+
2. **Add appropriate tags**
|
36 |
+
- Tags help others discover your model
|
37 |
+
- Include task types (e.g., "text-classification", "object-detection")
|
38 |
+
- Add framework tags (e.g., "pytorch", "tensorflow")
|
39 |
+
|
40 |
+
3. **Write a comprehensive model card**
|
41 |
+
- Describe what the model does and how it was trained
|
42 |
+
- Document model limitations and biases
|
43 |
+
- Include performance metrics
|
44 |
+
- Specify intended use cases
|
45 |
+
|
46 |
+
4. **Organize your files**
|
47 |
+
- Include all necessary files for model loading
|
48 |
+
- Add configuration files
|
49 |
+
- Include example scripts if helpful
|
50 |
+
|
51 |
+
5. **License your model appropriately**
|
52 |
+
- Choose an open-source license if possible
|
53 |
+
- Document any usage restrictions
|
54 |
+
"""
|
55 |
+
)
|
pyproject.toml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "repl-nix-workspace"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
requires-python = ">=3.11"
|
6 |
+
dependencies = [
|
7 |
+
"huggingface-hub>=0.29.2",
|
8 |
+
"streamlit>=1.43.1",
|
9 |
+
]
|
replit.nix
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{pkgs}: {
|
2 |
+
deps = [
|
3 |
+
pkgs.glibcLocales
|
4 |
+
pkgs.python312Packages.autopep8
|
5 |
+
pkgs.pre-commit
|
6 |
+
];
|
7 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Core dependencies
|
3 |
+
huggingface-hub>=0.29.2
|
4 |
+
streamlit>=1.43.1
|
5 |
+
|
6 |
+
# Development tools
|
7 |
+
black
|
8 |
+
isort
|
9 |
+
mypy
|
10 |
+
pylint
|
11 |
+
pytest
|
12 |
+
flake8
|
13 |
+
numpy
|
14 |
+
pandas
|
15 |
+
plotly
|
utils/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# This file is intentionally left empty to make the directory a Python package
|
utils/api_client.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import tempfile
|
4 |
+
import streamlit as st
|
5 |
+
from huggingface_hub import (
|
6 |
+
HfApi,
|
7 |
+
login,
|
8 |
+
create_repo,
|
9 |
+
delete_repo,
|
10 |
+
upload_file,
|
11 |
+
HfFolder,
|
12 |
+
)
|
13 |
+
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
|
14 |
+
|
15 |
+
|
16 |
+
class HuggingFaceClient:
|
17 |
+
def __init__(self, token=None):
|
18 |
+
self.token = token
|
19 |
+
self.api = HfApi(token=token)
|
20 |
+
|
21 |
+
def authenticate(self, token):
|
22 |
+
"""Authenticate with Hugging Face API using token"""
|
23 |
+
self.token = token
|
24 |
+
self.api = HfApi(token=token)
|
25 |
+
try:
|
26 |
+
login(token=token)
|
27 |
+
whoami = self.api.whoami()
|
28 |
+
return True, whoami
|
29 |
+
except Exception as e:
|
30 |
+
return False, str(e)
|
31 |
+
|
32 |
+
def get_user_models(self):
|
33 |
+
"""Get all models created by the logged-in user"""
|
34 |
+
try:
|
35 |
+
# First try to get username from whoami API call
|
36 |
+
whoami = self.api.whoami()
|
37 |
+
username = whoami.get("name")
|
38 |
+
|
39 |
+
# Fallback to the HF_USERNAME secret if available
|
40 |
+
if not username and os.environ.get("HF_USERNAME"):
|
41 |
+
username = os.environ.get("HF_USERNAME")
|
42 |
+
|
43 |
+
# Get all models for this user using the list_models API
|
44 |
+
user_models = list(self.api.list_models(author=username))
|
45 |
+
return user_models
|
46 |
+
except Exception as e:
|
47 |
+
st.error(f"Error fetching models: {str(e)}")
|
48 |
+
return []
|
49 |
+
|
50 |
+
def get_model_info(self, repo_id):
|
51 |
+
"""Get detailed information about a specific model"""
|
52 |
+
try:
|
53 |
+
model_info = self.api.model_info(repo_id)
|
54 |
+
return model_info
|
55 |
+
except RepositoryNotFoundError:
|
56 |
+
st.error(f"Repository {repo_id} not found")
|
57 |
+
return None
|
58 |
+
except Exception as e:
|
59 |
+
st.error(f"Error fetching model info: {str(e)}")
|
60 |
+
return None
|
61 |
+
|
62 |
+
def create_model_repository(
|
63 |
+
self, repo_name, is_private=False, exist_ok=False, repo_type="model"
|
64 |
+
):
|
65 |
+
"""Create a new model repository on Hugging Face"""
|
66 |
+
try:
|
67 |
+
response = create_repo(
|
68 |
+
repo_id=repo_name,
|
69 |
+
token=self.token,
|
70 |
+
private=is_private,
|
71 |
+
exist_ok=exist_ok,
|
72 |
+
repo_type=repo_type,
|
73 |
+
)
|
74 |
+
return True, response
|
75 |
+
except Exception as e:
|
76 |
+
return False, str(e)
|
77 |
+
|
78 |
+
def delete_model_repository(self, repo_id):
|
79 |
+
"""Delete a model repository from Hugging Face"""
|
80 |
+
try:
|
81 |
+
response = delete_repo(repo_id=repo_id, token=self.token)
|
82 |
+
return True, "Repository deleted successfully"
|
83 |
+
except Exception as e:
|
84 |
+
return False, str(e)
|
85 |
+
|
86 |
+
def upload_model_files(self, repo_id, files, commit_message="Upload model files"):
|
87 |
+
"""Upload model files to a repository"""
|
88 |
+
try:
|
89 |
+
uploaded_files = []
|
90 |
+
for file_path, file_content in files.items():
|
91 |
+
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
92 |
+
temp_file.write(file_content)
|
93 |
+
temp_file_path = temp_file.name
|
94 |
+
|
95 |
+
upload_response = upload_file(
|
96 |
+
path_or_fileobj=temp_file_path,
|
97 |
+
path_in_repo=file_path,
|
98 |
+
repo_id=repo_id,
|
99 |
+
token=self.token,
|
100 |
+
commit_message=commit_message,
|
101 |
+
)
|
102 |
+
uploaded_files.append(upload_response)
|
103 |
+
|
104 |
+
# Clean up temporary file
|
105 |
+
os.unlink(temp_file_path)
|
106 |
+
|
107 |
+
return True, uploaded_files
|
108 |
+
except Exception as e:
|
109 |
+
return False, str(e)
|
110 |
+
|
111 |
+
def update_model_card(self, repo_id, model_card_content):
|
112 |
+
"""Update the README.md (model card) of a repository"""
|
113 |
+
try:
|
114 |
+
with tempfile.NamedTemporaryFile(delete=False, mode="w") as temp_file:
|
115 |
+
temp_file.write(model_card_content)
|
116 |
+
temp_file_path = temp_file.name
|
117 |
+
|
118 |
+
upload_response = upload_file(
|
119 |
+
path_or_fileobj=temp_file_path,
|
120 |
+
path_in_repo="README.md",
|
121 |
+
repo_id=repo_id,
|
122 |
+
token=self.token,
|
123 |
+
commit_message="Update model card",
|
124 |
+
)
|
125 |
+
|
126 |
+
# Clean up temporary file
|
127 |
+
os.unlink(temp_file_path)
|
128 |
+
|
129 |
+
return True, upload_response
|
130 |
+
except Exception as e:
|
131 |
+
return False, str(e)
|
132 |
+
|
133 |
+
def get_model_tags(self):
|
134 |
+
"""Get available model tags from Hugging Face Hub"""
|
135 |
+
try:
|
136 |
+
# This is a simplified version; in a real app, you'd fetch actual tags from the HF API
|
137 |
+
tags = [
|
138 |
+
"text-classification",
|
139 |
+
"token-classification",
|
140 |
+
"question-answering",
|
141 |
+
"translation",
|
142 |
+
"summarization",
|
143 |
+
"text-generation",
|
144 |
+
"fill-mask",
|
145 |
+
"conversational",
|
146 |
+
"image-classification",
|
147 |
+
"object-detection",
|
148 |
+
"audio-classification",
|
149 |
+
"automatic-speech-recognition",
|
150 |
+
]
|
151 |
+
return tags
|
152 |
+
except Exception as e:
|
153 |
+
st.error(f"Error fetching tags: {str(e)}")
|
154 |
+
return []
|
utils/auth.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from utils.api_client import HuggingFaceClient
|
4 |
+
|
5 |
+
|
6 |
+
def check_authentication():
|
7 |
+
"""Handle user authentication with Hugging Face API"""
|
8 |
+
|
9 |
+
st.markdown(
|
10 |
+
"""
|
11 |
+
<div style="text-align: center; margin-bottom: 30px;">
|
12 |
+
<h1>🤗 Hugging Face Model Manager</h1>
|
13 |
+
<p>Manage your machine learning models and publish to the Hugging Face Model Hub</p>
|
14 |
+
</div>
|
15 |
+
""",
|
16 |
+
unsafe_allow_html=True,
|
17 |
+
)
|
18 |
+
|
19 |
+
st.markdown(
|
20 |
+
"""
|
21 |
+
<div style="background-color: #F9FAFB; padding: 20px; border-radius: 10px; border: 1px solid #E5E7EB;">
|
22 |
+
<h3 style="margin-top: 0;">Welcome to Hugging Face Model Manager</h3>
|
23 |
+
<p>This application allows you to:</p>
|
24 |
+
<ul>
|
25 |
+
<li>Create and manage model repositories</li>
|
26 |
+
<li>Upload and publish models to Hugging Face Hub</li>
|
27 |
+
<li>Update model metadata and documentation</li>
|
28 |
+
<li>Organize your models with tags and descriptions</li>
|
29 |
+
</ul>
|
30 |
+
</div>
|
31 |
+
""",
|
32 |
+
unsafe_allow_html=True,
|
33 |
+
)
|
34 |
+
|
35 |
+
with st.form("auth_form"):
|
36 |
+
st.subheader("Login with Hugging Face API Token")
|
37 |
+
|
38 |
+
# Info alert about creating a token
|
39 |
+
st.info(
|
40 |
+
"""
|
41 |
+
To use this application, you need a Hugging Face API token.
|
42 |
+
|
43 |
+
You can create one at: [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)
|
44 |
+
|
45 |
+
Make sure to grant **write** access if you want to upload models.
|
46 |
+
"""
|
47 |
+
)
|
48 |
+
|
49 |
+
# Token input
|
50 |
+
token = st.text_input("Enter your Hugging Face API token", type="password")
|
51 |
+
|
52 |
+
# Get token from environment or Secrets if available and not provided
|
53 |
+
if not token:
|
54 |
+
if os.environ.get("HF_TOKEN"):
|
55 |
+
token = os.environ.get("HF_TOKEN")
|
56 |
+
st.success("Using API token from Secrets.")
|
57 |
+
elif os.environ.get("HF_API_TOKEN"):
|
58 |
+
token = os.environ.get("HF_API_TOKEN")
|
59 |
+
st.success("Using API token from environment variables.")
|
60 |
+
|
61 |
+
submitted = st.form_submit_button("Login", use_container_width=True)
|
62 |
+
|
63 |
+
if submitted and token:
|
64 |
+
# Authenticate with Hugging Face
|
65 |
+
with st.spinner("Authenticating..."):
|
66 |
+
client = HuggingFaceClient()
|
67 |
+
success, user_info = client.authenticate(token)
|
68 |
+
|
69 |
+
if success:
|
70 |
+
st.session_state.authenticated = True
|
71 |
+
st.session_state.api_token = token
|
72 |
+
st.session_state.username = user_info.get("name", "User")
|
73 |
+
st.session_state.client = client
|
74 |
+
st.success(
|
75 |
+
f"Successfully authenticated as {st.session_state.username}"
|
76 |
+
)
|
77 |
+
st.rerun()
|
78 |
+
else:
|
79 |
+
st.error(f"Authentication failed: {user_info}")
|
80 |
+
|
81 |
+
elif submitted:
|
82 |
+
st.error("Please enter your Hugging Face API token")
|
83 |
+
|
84 |
+
|
85 |
+
def logout():
|
86 |
+
"""Log out the current user"""
|
87 |
+
for key in list(st.session_state.keys()):
|
88 |
+
del st.session_state[key]
|
89 |
+
st.session_state.authenticated = False
|
90 |
+
st.session_state.page = "home"
|
91 |
+
st.rerun()
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|