ajsbsd commited on
Commit
8dd0d9d
·
verified ·
1 Parent(s): 3d488b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -17
app.py CHANGED
@@ -1,10 +1,15 @@
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
  from neuralop.models import FNO
4
  import matplotlib.pyplot as plt
5
  import numpy as np
6
  import os
7
- import spaces
8
  from huggingface_hub import hf_hub_download
9
 
10
  # --- Configuration ---
@@ -66,39 +71,37 @@ def load_dataset():
66
  raise gr.Error(f"Failed to load dataset from local file: {e}")
67
  return FULL_DATASET_X
68
 
69
- # --- 3. Inference Function for Gradio (MODIFIED: Explicit device handling) ---
70
- @spaces.GPU()
71
  def run_inference(sample_index: int):
72
  """
73
- Performs inference for a selected sample index from the dataset.
74
- Ensures model and input are on the correct device (GPU).
75
  Returns two Matplotlib figures: one for input, one for output.
76
  """
77
- # Determine the target device (GPU if available, else CPU)
78
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
79
 
80
  model = load_model() # Model is initially loaded to CPU
81
 
82
- # Move model to the correct device ONLY when inside the @spaces.GPU() decorated function
83
- # and only if it's not already on the target device.
84
  if next(model.parameters()).device != device:
85
  model.to(device)
86
- print(f"Model moved to {device} within run_inference.")
87
 
88
  dataset = load_dataset()
89
 
90
  if not (0 <= sample_index < dataset.shape[0]):
91
  raise gr.Error(f"Sample index out of range. Please choose between 0 and {dataset.shape[0]-1}.")
92
 
93
- # Move input tensor to the correct device directly
94
  single_initial_condition = dataset[sample_index:sample_index+1, :, :].unsqueeze(1).to(device)
95
- print(f"Input moved to {device}.")
96
 
97
  print(f"Running inference for sample index {sample_index}...")
98
  with torch.no_grad(): # Disable gradient calculations for inference
99
- predicted_solution = model(single_initial_condition) # This is where the error occurred before
100
 
101
- # Move results back to CPU for plotting with Matplotlib
102
  input_numpy = single_initial_condition.squeeze().cpu().numpy()
103
  output_numpy = predicted_solution.squeeze().cpu().numpy()
104
 
@@ -117,7 +120,7 @@ def run_inference(sample_index: int):
117
 
118
  return fig_input, fig_output
119
 
120
- # --- Gradio Interface Setup (No change) ---
121
  with gr.Blocks() as demo:
122
  gr.Markdown(
123
  """
@@ -137,6 +140,16 @@ with gr.Blocks() as demo:
137
  label="Select Sample Index"
138
  )
139
  run_button = gr.Button("Generate Solution")
 
 
 
 
 
 
 
 
 
 
140
  with gr.Column():
141
  input_image_plot = gr.Plot(label="Selected Initial Condition")
142
  output_image_plot = gr.Plot(label="Predicted Solution")
@@ -151,10 +164,12 @@ with gr.Blocks() as demo:
151
  # These functions are called during main process startup (CPU)
152
  load_model()
153
  load_dataset()
154
- # The actual inference call here will ensure GPU utilization via @spaces.GPU()
155
  return run_inference(0)
156
 
157
  demo.load(load_initial_data_and_predict, inputs=None, outputs=[input_image_plot, output_image_plot])
158
 
159
  if __name__ == "__main__":
160
- demo.launch()
 
 
 
1
+ You can easily add that blurb by inserting a `gr.Markdown()` component within the same `gr.Column()` as your `sample_input_slider` and `run_button`. This effectively places it within Gradio's "flexbox" layout, ensuring it's always visible below the slider and button.
2
+
3
+ Here's your `app.py` code with the blurb added in the correct place. I've also updated the `run_inference` function to explicitly target `torch.device("cpu")` and removed the `@spaces.GPU()` decorator, which aligns with your successful run on ZeroCPU.
4
+
5
+ ```python
6
  import gradio as gr
7
  import torch
8
  from neuralop.models import FNO
9
  import matplotlib.pyplot as plt
10
  import numpy as np
11
  import os
12
+ # import spaces # No longer needed if running purely on CPU and not using @spaces.GPU()
13
  from huggingface_hub import hf_hub_download
14
 
15
  # --- Configuration ---
 
71
  raise gr.Error(f"Failed to load dataset from local file: {e}")
72
  return FULL_DATASET_X
73
 
74
+ # --- 3. Inference Function for Gradio ---
75
+ # Removed @spaces.GPU() decorator as you're running on ZeroCPU
76
  def run_inference(sample_index: int):
77
  """
78
+ Performs inference for a selected sample index from the dataset on CPU.
 
79
  Returns two Matplotlib figures: one for input, one for output.
80
  """
81
+ # Determine the target device (always CPU for ZeroCPU space)
82
+ device = torch.device("cpu") # Explicitly set to CPU as you're on ZeroCPU
83
 
84
  model = load_model() # Model is initially loaded to CPU
85
 
86
+ # Model device check is still good practice, even if always CPU here
 
87
  if next(model.parameters()).device != device:
88
  model.to(device)
89
+ print(f"Model moved to {device} within run_inference.") # Will now print 'Model moved to cpu...'
90
 
91
  dataset = load_dataset()
92
 
93
  if not (0 <= sample_index < dataset.shape[0]):
94
  raise gr.Error(f"Sample index out of range. Please choose between 0 and {dataset.shape[0]-1}.")
95
 
96
+ # Move input tensor to the correct device
97
  single_initial_condition = dataset[sample_index:sample_index+1, :, :].unsqueeze(1).to(device)
98
+ print(f"Input moved to {device}.") # Will now print 'Input moved to cpu.'
99
 
100
  print(f"Running inference for sample index {sample_index}...")
101
  with torch.no_grad(): # Disable gradient calculations for inference
102
+ predicted_solution = model(single_initial_condition)
103
 
104
+ # Move results back to CPU for plotting with Matplotlib (already on CPU now)
105
  input_numpy = single_initial_condition.squeeze().cpu().numpy()
106
  output_numpy = predicted_solution.squeeze().cpu().numpy()
107
 
 
120
 
121
  return fig_input, fig_output
122
 
123
+ # --- Gradio Interface Setup (MODIFIED to add blurb) ---
124
  with gr.Blocks() as demo:
125
  gr.Markdown(
126
  """
 
140
  label="Select Sample Index"
141
  )
142
  run_button = gr.Button("Generate Solution")
143
+
144
+ # --- ADDED BLURB HERE ---
145
+ gr.Markdown(
146
+ """
147
+ ### Project Inspiration
148
+ This Hugging Face Space demonstrates the concepts and models from the research paper **'Principled approaches for extending neural architectures to function spaces for operator learning'** (available as a preprint on [arXiv](https://arxiv.org/abs/2506.10973)). The underlying code for the neural operators and the experiments can be explored further in the associated [GitHub repository](https://github.com/neuraloperator/NNs-to-NOs). The Navier-Stokes dataset used for training and inference, crucial for these fluid dynamics simulations, is openly accessible and citable via [Zenodo](https://zenodo.org/records/12825163).
149
+ """
150
+ )
151
+ # --- END ADDED BLURB ---
152
+
153
  with gr.Column():
154
  input_image_plot = gr.Plot(label="Selected Initial Condition")
155
  output_image_plot = gr.Plot(label="Predicted Solution")
 
164
  # These functions are called during main process startup (CPU)
165
  load_model()
166
  load_dataset()
167
+ # The actual inference call here will now run on CPU
168
  return run_inference(0)
169
 
170
  demo.load(load_initial_data_and_predict, inputs=None, outputs=[input_image_plot, output_image_plot])
171
 
172
  if __name__ == "__main__":
173
+ demo.launch()
174
+
175
+ ```