fantos commited on
Commit
8a9898f
·
verified ·
1 Parent(s): dc5358b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -13
app.py CHANGED
@@ -8,8 +8,9 @@ import os
8
 
9
  import torch.nn.functional as F
10
 
11
- # Check for CUDA availability but fallback to CPU
12
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
13
 
14
  norm_layer = nn.InstanceNorm2d
15
 
@@ -86,28 +87,41 @@ class Generator(nn.Module):
86
  out = self.model4(out)
87
  return out
88
 
89
- # Initialize models
90
  def load_models():
91
  try:
92
- model1 = Generator(3, 1, 3).to(device)
93
- model2 = Generator(3, 1, 3).to(device)
94
 
95
- # Load local model files
96
- model1.load_state_dict(torch.load('model.pth', map_location=device))
97
- model2.load_state_dict(torch.load('model2.pth', map_location=device))
98
 
 
 
 
 
 
99
  model1.eval()
100
  model2.eval()
 
 
 
 
 
101
  return model1, model2
102
  except Exception as e:
103
- print(f"Error loading models: {str(e)}")
104
- raise gr.Error("Failed to load models. Please check if model files exist in the correct location.")
 
105
 
 
106
  try:
 
107
  model1, model2 = load_models()
 
108
  except Exception as e:
109
- print(f"Model initialization failed: {str(e)}")
110
- model1 = model2 = None
111
 
112
  def apply_style_transfer(img, strength=1.0):
113
  """Apply artistic style transfer effect"""
@@ -287,4 +301,10 @@ with gr.Blocks(css=custom_css) as iface:
287
  )
288
 
289
  # Launch the interface
290
- iface.launch()
 
 
 
 
 
 
 
8
 
9
  import torch.nn.functional as F
10
 
11
+ # Force CPU mode for Zero GPU environment
12
+ device = torch.device('cpu')
13
+ torch.set_num_threads(4) # Optimize CPU performance
14
 
15
  norm_layer = nn.InstanceNorm2d
16
 
 
87
  out = self.model4(out)
88
  return out
89
 
90
+ # Initialize models with error handling and memory optimization
91
  def load_models():
92
  try:
93
+ print("Initializing models in CPU mode...")
 
94
 
95
+ # Initialize models
96
+ model1 = Generator(3, 1, 3)
97
+ model2 = Generator(3, 1, 3)
98
 
99
+ # Load model weights with explicit CPU mapping
100
+ model1.load_state_dict(torch.load('model.pth', map_location='cpu'))
101
+ model2.load_state_dict(torch.load('model2.pth', map_location='cpu'))
102
+
103
+ # Set to eval mode and optimize for inference
104
  model1.eval()
105
  model2.eval()
106
+
107
+ # Enable inference optimizations
108
+ torch.set_grad_enabled(False)
109
+
110
+ print("Models loaded successfully in CPU mode")
111
  return model1, model2
112
  except Exception as e:
113
+ error_msg = f"Error loading models: {str(e)}"
114
+ print(error_msg)
115
+ raise gr.Error("Failed to initialize models. Please check the model files and system configuration.")
116
 
117
+ # Load models with proper error handling
118
  try:
119
+ print("Starting model initialization...")
120
  model1, model2 = load_models()
121
+ print("Model initialization completed")
122
  except Exception as e:
123
+ print(f"Critical error during model initialization: {str(e)}")
124
+ raise gr.Error("Failed to start the application due to model initialization error.")
125
 
126
  def apply_style_transfer(img, strength=1.0):
127
  """Apply artistic style transfer effect"""
 
301
  )
302
 
303
  # Launch the interface
304
+ iface.launch(
305
+ server_name="0.0.0.0", # Required for Hugging Face Spaces
306
+ server_port=7860, # Default port for Hugging Face Spaces
307
+ share=False, # Disable public URL
308
+ debug=False, # Disable debug mode
309
+ enable_queue=True # Enable queue for better performance
310
+ )