MegaTronX commited on
Commit
fcfd5e1
·
verified ·
1 Parent(s): 78a9e82

Update joycaption.py

Browse files
Files changed (1) hide show
  1. joycaption.py +7 -7
joycaption.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
- if os.environ.get("SPACES_ZERO_GPU") is not None:
3
- import spaces
4
  else:
5
  class spaces:
6
  @staticmethod
@@ -23,10 +23,10 @@ from typing import Union
23
 
24
  LOAD_IN_NF4 = True
25
 
26
- if os.environ.get("SPACES_ZERO_GPU") is not None:
27
- import subprocess
28
- LOAD_IN_NF4 = False # If true, Custom VLM LoRA doesn't work initially. The rest are fine.
29
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
30
 
31
  BASE_DIR = Path(__file__).resolve().parent # Define the base directory
32
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -474,7 +474,7 @@ def get_repo_gguf(repo_id: str):
474
  else: return gr.update(value=files[0], choices=files)
475
 
476
 
477
- @spaces.GPU
478
  def change_text_model(model_name: str=MODEL_PATH, use_client: bool=False, gguf_file: Union[str, None]=None,
479
  is_nf4: bool=True, is_lora: bool=True, progress=gr.Progress(track_tqdm=True)):
480
  global use_inference_client, llm_models
 
1
  import os
2
+ #if os.environ.get("SPACES_ZERO_GPU") is not None:
3
+ # import spaces
4
  else:
5
  class spaces:
6
  @staticmethod
 
23
 
24
  LOAD_IN_NF4 = True
25
 
26
+ #if os.environ.get("SPACES_ZERO_GPU") is not None:
27
+ # import subprocess
28
+ # LOAD_IN_NF4 = False # If true, Custom VLM LoRA doesn't work initially. The rest are fine.
29
+ # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
30
 
31
  BASE_DIR = Path(__file__).resolve().parent # Define the base directory
32
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
474
  else: return gr.update(value=files[0], choices=files)
475
 
476
 
477
+ #@spaces.GPU
478
  def change_text_model(model_name: str=MODEL_PATH, use_client: bool=False, gguf_file: Union[str, None]=None,
479
  is_nf4: bool=True, is_lora: bool=True, progress=gr.Progress(track_tqdm=True)):
480
  global use_inference_client, llm_models