Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import zipfile
|
|
7 |
import os
|
8 |
import torch
|
9 |
|
|
|
10 |
# Percorsi ZIP per manuali e problemi
|
11 |
zip_path_m = "faiss_manual_index.zip"
|
12 |
faiss_manual_index = "faiss_manual_index"
|
@@ -31,6 +32,15 @@ problems_vectorstore = FAISS.load_local(faiss_problems_index, embedding_model, a
|
|
31 |
|
32 |
# Caricamento del modello GPT-J da Hugging Face
|
33 |
model_name = "EleutherAI/gpt-j-6B"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
35 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
36 |
|
|
|
7 |
import os
|
8 |
import torch
|
9 |
|
10 |
+
|
11 |
# Percorsi ZIP per manuali e problemi
|
12 |
zip_path_m = "faiss_manual_index.zip"
|
13 |
faiss_manual_index = "faiss_manual_index"
|
|
|
32 |
|
33 |
# Caricamento del modello GPT-J da Hugging Face
|
34 |
model_name = "EleutherAI/gpt-j-6B"
|
35 |
+
|
36 |
+
# Forza l'uso della CPU
|
37 |
+
model = AutoModelForCausalLM.from_pretrained(
|
38 |
+
model_name,
|
39 |
+
torch_dtype=torch.float32, # float32 per la CPU
|
40 |
+
device_map={"": "cpu"} # Specifica CPU come dispositivo
|
41 |
+
)
|
42 |
+
|
43 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
44 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
45 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
46 |
|