Spaces:
Runtime error
Runtime error
updated interface and error handling
Browse files
app.py
CHANGED
@@ -8,6 +8,8 @@ import gradio as gr
|
|
8 |
from huggingface_hub import HfApi
|
9 |
from huggingface_hub import ModelCard
|
10 |
|
|
|
|
|
11 |
from textwrap import dedent
|
12 |
|
13 |
HF_PATH = "https://huggingface.co/"
|
@@ -69,8 +71,14 @@ QUANTIZATIONS = ["q0f16",
|
|
69 |
"q4f16_awq"]
|
70 |
|
71 |
def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None):
|
72 |
-
if
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
api = HfApi(token=oauth_token.token)
|
76 |
model_dir_name = hf_model_id.split("/")[1]
|
@@ -81,13 +89,17 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
|
|
81 |
|
82 |
api.snapshot_download(repo_id=hf_model_id, local_dir=f"./dist/models/{model_dir_name}")
|
83 |
|
84 |
-
|
85 |
" --quantization " + quantization + \
|
86 |
-
" -o dist/" + mlc_model_name)
|
|
|
|
|
87 |
|
88 |
-
|
89 |
" --quantization " + quantization + " --conv-template " + conv_template + \
|
90 |
-
" -o dist/" + mlc_model_name + "/")
|
|
|
|
|
91 |
|
92 |
# push to HF
|
93 |
user_name = api.whoami()["name"]
|
@@ -131,11 +143,20 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
|
|
131 |
|
132 |
os.system("rm -rf dist/")
|
133 |
|
134 |
-
return "Successful"
|
135 |
|
136 |
with gr.Blocks() as demo:
|
137 |
gr.LoginButton()
|
138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
conv = gr.Dropdown(CONV_TEMPLATES, label="Conversation Template")
|
140 |
quant = gr.Dropdown(QUANTIZATIONS, label="Quantization Method")
|
141 |
btn = gr.Button("Convert to MLC")
|
|
|
8 |
from huggingface_hub import HfApi
|
9 |
from huggingface_hub import ModelCard
|
10 |
|
11 |
+
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
12 |
+
|
13 |
from textwrap import dedent
|
14 |
|
15 |
HF_PATH = "https://huggingface.co/"
|
|
|
71 |
"q4f16_awq"]
|
72 |
|
73 |
def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None):
|
74 |
+
if oauth_token.token == None:
|
75 |
+
return "Log in to Huggingface to use this"
|
76 |
+
elif not hf_model_id:
|
77 |
+
return "Enter a Huggingface model ID"
|
78 |
+
elif not conv_template:
|
79 |
+
return "Select a conversation template"
|
80 |
+
elif not quantization:
|
81 |
+
return "Select a quantization method"
|
82 |
|
83 |
api = HfApi(token=oauth_token.token)
|
84 |
model_dir_name = hf_model_id.split("/")[1]
|
|
|
89 |
|
90 |
api.snapshot_download(repo_id=hf_model_id, local_dir=f"./dist/models/{model_dir_name}")
|
91 |
|
92 |
+
convert_weight_result = subprocess.run(["mlc_llm convert_weight ./dist/models/" + model_dir_name + "/" + \
|
93 |
" --quantization " + quantization + \
|
94 |
+
" -o dist/" + mlc_model_name], shell=True, capture_output=True, text=True)
|
95 |
+
if convert_weight_result.stderr:
|
96 |
+
return convert_weight_result.stderr
|
97 |
|
98 |
+
gen_config_result = subprocess.run(["mlc_llm gen_config ./dist/models/" + model_dir_name + "/" + \
|
99 |
" --quantization " + quantization + " --conv-template " + conv_template + \
|
100 |
+
" -o dist/" + mlc_model_name + "/"], shell=True, capture_output=True, text=True)
|
101 |
+
if gen_config_result.stderr:
|
102 |
+
return gen_config_result.stderr
|
103 |
|
104 |
# push to HF
|
105 |
user_name = api.whoami()["name"]
|
|
|
143 |
|
144 |
os.system("rm -rf dist/")
|
145 |
|
146 |
+
return "Successful, please find your compiled LLM model on your personal account"
|
147 |
|
148 |
with gr.Blocks() as demo:
|
149 |
gr.LoginButton()
|
150 |
+
gr.Markdown(
|
151 |
+
"""
|
152 |
+
# Compile your LLM model with MLC-LLM and run it locally!
|
153 |
+
### This space takes in Huggingface model ID, and converts it for you using your selected conversation template and quantization method!
|
154 |
+
""")
|
155 |
+
model_id = HuggingfaceHubSearch(
|
156 |
+
label="HF Model ID",
|
157 |
+
placeholder="Search for your model on Huggingface",
|
158 |
+
search_type="model",
|
159 |
+
)
|
160 |
conv = gr.Dropdown(CONV_TEMPLATES, label="Conversation Template")
|
161 |
quant = gr.Dropdown(QUANTIZATIONS, label="Quantization Method")
|
162 |
btn = gr.Button("Convert to MLC")
|