yrshi commited on
Commit
0980c20
·
1 Parent(s): 2770f17

changed spaces.GPU position.

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -1,6 +1,6 @@
1
- import subprocess
2
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
3
- subprocess.run('pip install -U timm', shell=True)
4
  import spaces
5
  import os
6
  import torch
@@ -197,6 +197,7 @@ class InferenceRunner:
197
  graph_list.append(graph_item)
198
  return graph_list, input_text
199
 
 
200
  @torch.no_grad()
201
  def predict(self, rxn_dict, temperature=1):
202
  graphs, prompt_tokens = self.tokenize(rxn_dict)
@@ -236,7 +237,6 @@ class InferenceRunner:
236
  input_prompt_tokens['is_mol_token'] = is_mol_token
237
  return graphs, input_prompt_tokens
238
 
239
- @spaces.GPU
240
  def main(args):
241
  device = torch.device('cuda')
242
  # model
 
1
+ # import subprocess
2
+ # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
3
+ # subprocess.run('pip install -U timm', shell=True)
4
  import spaces
5
  import os
6
  import torch
 
197
  graph_list.append(graph_item)
198
  return graph_list, input_text
199
 
200
+ @spaces.GPU
201
  @torch.no_grad()
202
  def predict(self, rxn_dict, temperature=1):
203
  graphs, prompt_tokens = self.tokenize(rxn_dict)
 
237
  input_prompt_tokens['is_mol_token'] = is_mol_token
238
  return graphs, input_prompt_tokens
239
 
 
240
  def main(args):
241
  device = torch.device('cuda')
242
  # model