ford442 commited on
Commit
645ce84
·
verified ·
1 Parent(s): f80d9ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -36,9 +36,9 @@ torch.backends.cuda.preferred_blas_library="cublas"
36
 
37
  hftoken = os.getenv("HF_AUTH_TOKEN")
38
 
39
- image_encoder_path = "google/siglip-so400m-patch14-384"
40
- ipadapter_path = hf_hub_download(repo_id="InstantX/SD3.5-Large-IP-Adapter", filename="ip-adapter.bin")
41
- model_path = 'ford442/stable-diffusion-3.5-medium-bf16'
42
 
43
  def upload_to_ftp(filename):
44
  try:
@@ -56,7 +56,7 @@ def upload_to_ftp(filename):
56
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
57
  torch_dtype = torch.bfloat16
58
 
59
- checkpoint = "microsoft/Phi-3.5-mini-instruct"
60
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
61
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
@@ -102,9 +102,9 @@ pipe.to(device=device, dtype=torch.bfloat16)
102
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
103
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
104
 
105
- tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=True)
106
- tokenizer.tokenizer_legacy=False
107
- model = AutoModelForCausalLM.from_pretrained(checkpoint).to('cuda')
108
  #model = torch.compile(model)
109
 
110
  upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device("cuda:0"))
@@ -153,6 +153,7 @@ def infer(
153
  torch.set_float32_matmul_precision("highest")
154
  seed = random.randint(0, MAX_SEED)
155
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
156
  if expanded:
157
  system_prompt_rewrite = (
158
  "You are an AI assistant that rewrites image prompts to be more descriptive and detailed."
@@ -204,9 +205,10 @@ def infer(
204
  print('-- filtered prompt 2 --')
205
  print(enhanced_prompt_2)
206
  else:
207
- enhanced_prompt = prompt
208
- enhanced_prompt_2 = prompt
209
- model.to('cpu')
 
210
  if latent_file: # Check if a latent file is provided
211
  # initial_latents = pipe.prepare_latents(
212
  # batch_size=1,
 
36
 
37
  hftoken = os.getenv("HF_AUTH_TOKEN")
38
 
39
+ #image_encoder_path = "google/siglip-so400m-patch14-384"
40
+ #ipadapter_path = hf_hub_download(repo_id="InstantX/SD3.5-Large-IP-Adapter", filename="ip-adapter.bin")
41
+ #model_path = 'ford442/stable-diffusion-3.5-medium-bf16'
42
 
43
  def upload_to_ftp(filename):
44
  try:
 
56
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
57
  torch_dtype = torch.bfloat16
58
 
59
+ #checkpoint = "microsoft/Phi-3.5-mini-instruct"
60
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
61
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
 
102
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
103
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
104
 
105
+ #tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=True)
106
+ #tokenizer.tokenizer_legacy=False
107
+ #model = AutoModelForCausalLM.from_pretrained(checkpoint).to('cuda')
108
  #model = torch.compile(model)
109
 
110
  upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device("cuda:0"))
 
153
  torch.set_float32_matmul_precision("highest")
154
  seed = random.randint(0, MAX_SEED)
155
  generator = torch.Generator(device='cuda').manual_seed(seed)
156
+ '''
157
  if expanded:
158
  system_prompt_rewrite = (
159
  "You are an AI assistant that rewrites image prompts to be more descriptive and detailed."
 
205
  print('-- filtered prompt 2 --')
206
  print(enhanced_prompt_2)
207
  else:
208
+ '''
209
+ enhanced_prompt = prompt
210
+ enhanced_prompt_2 = prompt
211
+ #model.to('cpu')
212
  if latent_file: # Check if a latent file is provided
213
  # initial_latents = pipe.prepare_latents(
214
  # batch_size=1,