prithivMLmods commited on
Commit
a658528
·
verified ·
1 Parent(s): 45b302e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -1
app.py CHANGED
@@ -4,20 +4,31 @@ import torch
4
  from PIL import Image
5
  from transformers import AutoProcessor, AutoModelForCausalLM
6
 
7
- # Attempt to install flash-attn
 
 
 
 
 
 
 
 
8
  try:
9
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, check=True, shell=True)
 
10
  except subprocess.CalledProcessError as e:
11
  print(f"Error installing flash-attn: {e}")
12
  print("Continuing without flash-attn.")
13
 
14
  # Determine the device to use
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
16
 
17
  # Load the base model and processor
18
  try:
19
  vision_language_model_base = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
20
  vision_language_processor_base = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
 
21
  except Exception as e:
22
  print(f"Error loading base model: {e}")
23
  vision_language_model_base = None
@@ -27,6 +38,7 @@ except Exception as e:
27
  try:
28
  vision_language_model_large = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to(device).eval()
29
  vision_language_processor_large = AutoProcessor.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True)
 
30
  except Exception as e:
31
  print(f"Error loading large model: {e}")
32
  vision_language_model_large = None
 
4
  from PIL import Image
5
  from transformers import AutoProcessor, AutoModelForCausalLM
6
 
7
+ # Upgrade transformers to the latest version
8
+ try:
9
+ subprocess.run('pip install --upgrade transformers', check=True, shell=True)
10
+ print("Successfully upgraded transformers.")
11
+ except subprocess.CalledProcessError as e:
12
+ print(f"Error upgrading transformers: {e}")
13
+ print("Continuing with the current version, but this may cause issues.")
14
+
15
+ # Attempt to install flash-attn (optional, for performance)
16
  try:
17
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, check=True, shell=True)
18
+ print("Successfully installed flash-attn.")
19
  except subprocess.CalledProcessError as e:
20
  print(f"Error installing flash-attn: {e}")
21
  print("Continuing without flash-attn.")
22
 
23
  # Determine the device to use
24
  device = "cuda" if torch.cuda.is_available() else "cpu"
25
+ print(f"Using device: {device}")
26
 
27
  # Load the base model and processor
28
  try:
29
  vision_language_model_base = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
30
  vision_language_processor_base = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
31
+ print("Base model and processor loaded successfully.")
32
  except Exception as e:
33
  print(f"Error loading base model: {e}")
34
  vision_language_model_base = None
 
38
  try:
39
  vision_language_model_large = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to(device).eval()
40
  vision_language_processor_large = AutoProcessor.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True)
41
+ print("Large model and processor loaded successfully.")
42
  except Exception as e:
43
  print(f"Error loading large model: {e}")
44
  vision_language_model_large = None