Spaces:
Running
Running
MekkCyber
commited on
Commit
·
eee3af5
1
Parent(s):
b513799
fix
Browse files
app.py
CHANGED
@@ -106,8 +106,7 @@ def save_model(model, model_name, quantization_type, group_size=128, username=No
|
|
106 |
repo_id=repo_name,
|
107 |
repo_type="model",
|
108 |
)
|
109 |
-
|
110 |
-
return f"https://huggingface.co/{repo_name}"
|
111 |
|
112 |
def quantize_and_save(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None, model_name, quantization_type, group_size, quantized_model_name):
|
113 |
if oauth_token is None :
|
@@ -119,12 +118,11 @@ def quantize_and_save(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToke
|
|
119 |
return exists_message
|
120 |
if quantization_type == "int4_weight_only" :
|
121 |
return "int4_weight_only not supported on cpu"
|
122 |
-
# try :
|
123 |
if not group_size.isdigit() :
|
124 |
return "group_size must be a number"
|
125 |
|
126 |
group_size = int(group_size)
|
127 |
-
|
128 |
quantized_model = quantize_model(model_name, quantization_type, group_size, oauth_token, profile.username)
|
129 |
return save_model(quantized_model, model_name, quantization_type, group_size, profile.username, oauth_token, quantized_model_name)
|
130 |
# except Exception as e :
|
@@ -157,7 +155,7 @@ with gr.Blocks(theme=gr.themes.Ocean(), css=css) as app:
|
|
157 |
placeholder="Search for model id on Huggingface",
|
158 |
search_type="model",
|
159 |
)
|
160 |
-
with gr.Row():
|
161 |
with gr.Column():
|
162 |
quantization_type = gr.Dropdown(
|
163 |
label="Quantization Type",
|
|
|
106 |
repo_id=repo_name,
|
107 |
repo_type="model",
|
108 |
)
|
109 |
+
return f'<h1>✅ DONE</h1><br/>Find your repo here: <a href="https://huggingface.co/{repo_name}" target="_blank" style="text-decoration:underline">{repo_name}</a>'
|
|
|
110 |
|
111 |
def quantize_and_save(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None, model_name, quantization_type, group_size, quantized_model_name):
|
112 |
if oauth_token is None :
|
|
|
118 |
return exists_message
|
119 |
if quantization_type == "int4_weight_only" :
|
120 |
return "int4_weight_only not supported on cpu"
|
|
|
121 |
if not group_size.isdigit() :
|
122 |
return "group_size must be a number"
|
123 |
|
124 |
group_size = int(group_size)
|
125 |
+
# try:
|
126 |
quantized_model = quantize_model(model_name, quantization_type, group_size, oauth_token, profile.username)
|
127 |
return save_model(quantized_model, model_name, quantization_type, group_size, profile.username, oauth_token, quantized_model_name)
|
128 |
# except Exception as e :
|
|
|
155 |
placeholder="Search for model id on Huggingface",
|
156 |
search_type="model",
|
157 |
)
|
158 |
+
with gr.Row(style="margin-top: 30px;"):
|
159 |
with gr.Column():
|
160 |
quantization_type = gr.Dropdown(
|
161 |
label="Quantization Type",
|