prithivMLmods commited on
Commit
06d2d7b
·
verified ·
1 Parent(s): 76967ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -46,7 +46,6 @@ DEFAULT_MAX_NEW_TOKENS = 1024
46
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
47
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
48
 
49
-
50
  # Load text-only model and tokenizer
51
  model_id = "prithivMLmods/FastThink-0.5B-Tiny"
52
  tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -156,7 +155,7 @@ STYLE_NAMES = list(styles.keys())
156
  def apply_style(style_name: str, positive: str) -> str:
157
  return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
158
 
159
- @spaces.GPU(duration=60, enable_queue=True)
160
  def generate_image_flux(
161
  prompt: str,
162
  seed: int = 0,
 
46
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
47
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
48
 
 
49
  # Load text-only model and tokenizer
50
  model_id = "prithivMLmods/FastThink-0.5B-Tiny"
51
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
155
  def apply_style(style_name: str, positive: str) -> str:
156
  return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
157
 
158
+ @spaces.GPU
159
  def generate_image_flux(
160
  prompt: str,
161
  seed: int = 0,