echarlaix HF staff commited on
Commit
d114365
·
1 Parent(s): da32672

update description

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -47,8 +47,9 @@ def process_model(
47
  model_name = model_id.split("/")[-1]
48
  username = whoami(oauth_token.token)["name"]
49
  new_repo_id = f"{username}/{model_name}-openvino-{dtype}"
50
-
51
  task = TasksManager.infer_task_from_model(model_id)
 
 
52
  if task not in _HEAD_TO_AUTOMODELS:
53
  raise ValueError(
54
  f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported"
@@ -66,7 +67,6 @@ def process_model(
66
  export = len(ov_files) == 0
67
 
68
  is_int8 = dtype == "int8"
69
- library_name = TasksManager.infer_library_from_model(model_id)
70
  if library_name == "diffusers":
71
  quant_method = "hybrid"
72
  elif not is_int8:
@@ -160,6 +160,12 @@ def process_model(
160
  shutil.rmtree(folder, ignore_errors=True)
161
 
162
 
 
 
 
 
 
 
163
  model_id = HuggingfaceHubSearch(
164
  label="Hub Model ID",
165
  placeholder="Search for model id on the hub",
@@ -227,7 +233,7 @@ interface = gr.Interface(
227
  gr.Markdown(label="output"),
228
  ],
229
  title="Quantize your model with NNCF",
230
- description="This space takes a model, converts it to the OpenVINO format and applies NNCF weight only quantization. The resulting model will then be pushed on the Hub under your HF user namespace",
231
  api_name=False,
232
  )
233
 
 
47
  model_name = model_id.split("/")[-1]
48
  username = whoami(oauth_token.token)["name"]
49
  new_repo_id = f"{username}/{model_name}-openvino-{dtype}"
 
50
  task = TasksManager.infer_task_from_model(model_id)
51
+ library_name = TasksManager.infer_library_from_model(model_id)
52
+
53
  if task not in _HEAD_TO_AUTOMODELS:
54
  raise ValueError(
55
  f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported"
 
67
  export = len(ov_files) == 0
68
 
69
  is_int8 = dtype == "int8"
 
70
  if library_name == "diffusers":
71
  quant_method = "hybrid"
72
  elif not is_int8:
 
160
  shutil.rmtree(folder, ignore_errors=True)
161
 
162
 
163
+ DESCRIPTION = """
164
+ This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/openvino/optimization) to automatically apply NNCF weight only quantization on a model hosted on the [Hub](https://huggingface.co/models) and convert it to the [OpenVINO format](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html) if not already.
165
+
166
+ The resulting model will then be pushed under your HF user namespace. For now we only support conversion for models that are hosted on public repositories.
167
+ """
168
+
169
  model_id = HuggingfaceHubSearch(
170
  label="Hub Model ID",
171
  placeholder="Search for model id on the hub",
 
233
  gr.Markdown(label="output"),
234
  ],
235
  title="Quantize your model with NNCF",
236
+ description=DESCRIPTION,
237
  api_name=False,
238
  )
239