jattokatarratto commited on
Commit
e18819a
·
verified ·
1 Parent(s): ef18338

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -1,11 +1,5 @@
1
  import os
2
 
3
- os.environ["CUDA_VISIBLE_DEVICES"] = "1,6" # to use the GPUs 3,4 only
4
-
5
- os.environ["HF_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
6
- os.environ["HUGGINGFACE_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
7
- os.environ["HF_HOME"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
8
-
9
  from transformers import file_utils
10
  print(file_utils.default_cache_path)
11
 
@@ -27,10 +21,10 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
27
  from collections import Counter
28
 
29
  #os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
30
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
31
 
32
  import torch
33
- torch.cuda.empty_cache() # Clear cache ot torch
34
 
35
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
36
  print(f"Device: {device}...")
 
1
  import os
2
 
 
 
 
 
 
 
3
  from transformers import file_utils
4
  print(file_utils.default_cache_path)
5
 
 
21
  from collections import Counter
22
 
23
  #os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
24
+ #os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
25
 
26
  import torch
27
+ #torch.cuda.empty_cache() # Clear cache ot torch
28
 
29
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
30
  print(f"Device: {device}...")