jattokatarratto commited on
Commit
90dc03c
·
verified ·
1 Parent(s): 57e9633

Update app-demo-myMultiNER.py

Browse files
Files changed (1) hide show
  1. app-demo-myMultiNER.py +8 -16
app-demo-myMultiNER.py CHANGED
@@ -1,13 +1,13 @@
1
  import os
2
 
3
- os.environ["CUDA_VISIBLE_DEVICES"] = "1,6" # to use the GPUs 3,4 only
4
-
5
- os.environ["HF_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
6
- os.environ["HUGGINGFACE_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
7
- os.environ["HF_HOME"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
8
 
9
  from transformers import file_utils
10
- print(file_utils.default_cache_path)
11
 
12
  import pandas as pd
13
  from tqdm import tqdm
@@ -19,12 +19,12 @@ from collections import Counter
19
  from transformers import pipeline, AutoTokenizer
20
 
21
  #os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
22
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
23
 
24
  #import html
25
 
26
  import torch
27
- torch.cuda.empty_cache() # Clear cache ot torch
28
 
29
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
30
  print(f"Device: {device}...")
@@ -48,14 +48,6 @@ from nerBio import annotate, entitiesFusion, is_cross_inside, elinking
48
  from llmqueryNer import call_model, call_model_with_caching, process_list, setup_gptjrc, api_call_gptjrc, model_list_gptjrc
49
 
50
 
51
- from joblib import Memory
52
-
53
- cachedir = 'cached'
54
- mem = Memory(cachedir, verbose=False)
55
-
56
- # this is to completely delete the cache:
57
- # mem.clear(warn=False)
58
-
59
 
60
 
61
 
 
1
  import os
2
 
3
+ #os.environ["CUDA_VISIBLE_DEVICES"] = "1,6" # to use the GPUs 3,4 only
4
+ #
5
+ #os.environ["HF_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
6
+ #os.environ["HUGGINGFACE_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
7
+ #os.environ["HF_HOME"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
8
 
9
  from transformers import file_utils
10
+ #print(file_utils.default_cache_path)
11
 
12
  import pandas as pd
13
  from tqdm import tqdm
 
19
  from transformers import pipeline, AutoTokenizer
20
 
21
  #os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
22
+ #os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
23
 
24
  #import html
25
 
26
  import torch
27
+ #torch.cuda.empty_cache() # Clear cache ot torch
28
 
29
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
30
  print(f"Device: {device}...")
 
48
  from llmqueryNer import call_model, call_model_with_caching, process_list, setup_gptjrc, api_call_gptjrc, model_list_gptjrc
49
 
50
 
 
 
 
 
 
 
 
 
51
 
52
 
53