Spaces:
Runtime error
Runtime error
scontess
commited on
Commit
Β·
a16d3e9
1
Parent(s):
48b6506
dopopull
Browse files
src/streamlit_app.py
CHANGED
|
@@ -9,87 +9,42 @@ from tensorflow.keras.layers import Dense, Flatten
|
|
| 9 |
from tensorflow.keras.models import Model, load_model
|
| 10 |
from datasets import load_dataset
|
| 11 |
import matplotlib.pyplot as plt
|
| 12 |
-
from sklearn.metrics import confusion_matrix
|
| 13 |
import seaborn as sns
|
| 14 |
from huggingface_hub import HfApi
|
| 15 |
import os
|
| 16 |
|
| 17 |
-
|
| 18 |
-
# π Percorso della cartella dove sarΓ salvato il dataset se uso TFlow, non serve se setto hf_dataset_cache
|
| 19 |
-
#DATA_DIR = "/app" #"/tmp"
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
# π Autenticazione Hugging Face dal Secret nello Space
|
| 23 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 24 |
os.environ["HF_HOME"] = "/app/.cache"
|
| 25 |
os.environ["HF_DATASETS_CACHE"] = "/app/.cache"
|
|
|
|
| 26 |
|
| 27 |
-
|
| 28 |
if HF_TOKEN:
|
| 29 |
api = HfApi()
|
| 30 |
user_info = api.whoami(HF_TOKEN)
|
| 31 |
-
|
| 32 |
-
if "name" in user_info:
|
| 33 |
-
st.write(f"β
Autenticato come {user_info['name']}")
|
| 34 |
-
else:
|
| 35 |
-
st.warning("β οΈ Token API non valido! Controlla il Secret nello Space.")
|
| 36 |
else:
|
| 37 |
st.warning("β οΈ Nessun token API trovato! Verifica il Secret nello Space.")
|
| 38 |
|
| 39 |
-
# π
|
| 40 |
st.write("π Caricamento di 300 immagini da `tiny-imagenet`...")
|
| 41 |
-
|
| 42 |
-
# π Recupera il valore della variabile d'ambiente
|
| 43 |
-
hf_cache_path = os.getenv("HF_DATASETS_CACHE", "β Variabile non impostata!")
|
| 44 |
-
|
| 45 |
-
# πΉ Mostra il valore nella UI di Streamlit
|
| 46 |
-
st.write(f"π Cache dei dataset Hugging Face: {hf_cache_path}")
|
| 47 |
-
|
| 48 |
-
# π Testa se la cache ha i permessi giusti PRIMA di caricare il dataset
|
| 49 |
-
test_file = "/app/.cache/test.txt"
|
| 50 |
-
try:
|
| 51 |
-
with open(test_file, "w") as f:
|
| 52 |
-
f.write("Test permessi OK!")
|
| 53 |
-
st.write("β
La cartella ha i permessi giusti!")
|
| 54 |
-
except PermissionError:
|
| 55 |
-
st.error("β ERRORE: La cartella /app/.cache non ha permessi di scrittura!")
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
# π Carica il dataset direttamente da Hugging Face
|
| 60 |
-
os.environ["HF_HOME"] = "/tmp/huggingface"
|
| 61 |
-
os.environ["HF_DATASETS_CACHE"] = "/tmp/huggingface"
|
| 62 |
-
os.makedirs(os.getenv("HF_DATASETS_CACHE"), exist_ok=True)
|
| 63 |
dataset = load_dataset("zh-plus/tiny-imagenet", split="train")
|
| 64 |
|
| 65 |
-
# π Recupera il primo esempio
|
| 66 |
-
sample = dataset[0]
|
| 67 |
-
image = sample["image"]
|
| 68 |
-
label = sample["label"]
|
| 69 |
-
|
| 70 |
-
# π Mostra l'immagine e la classe in Streamlit
|
| 71 |
-
st.image(image, caption=f"Classe: {label}", use_container_width=True)
|
| 72 |
-
st.write(f"π Esempio dal dataset: {sample}")
|
| 73 |
-
|
| 74 |
-
#Carica il dataset esterno da imagenet PER TENSORFLOW
|
| 75 |
-
#imagenet = tfds.load("imagenet_resized/64x64", split="train", as_supervised=True, download=True, data_dir=DATA_DIR)
|
| 76 |
-
|
| 77 |
image_list = []
|
| 78 |
label_list = []
|
| 79 |
|
| 80 |
-
#for i, (image, label) in enumerate(imagenet.take(300)): # Prende solo 300 immagini PER TENSORFLOW
|
| 81 |
for i, sample in enumerate(dataset):
|
| 82 |
if i >= 300: # Prende solo 300 immagini
|
| 83 |
break
|
| 84 |
-
|
| 85 |
-
image = tf.image.resize(image, (64, 64)) / 255.0 # Normalizzazione
|
| 86 |
image_list.append(image.numpy())
|
| 87 |
label_list.append(np.array(sample["label"]))
|
| 88 |
|
| 89 |
X_train = np.array(image_list)
|
| 90 |
y_train = np.array(label_list)
|
| 91 |
|
| 92 |
-
st.write(f"β
Scaricate e preprocessate {len(X_train)} immagini da `tiny-
|
| 93 |
|
| 94 |
# π Caricamento del modello
|
| 95 |
if os.path.exists("Silva.h5"):
|
|
@@ -97,35 +52,25 @@ if os.path.exists("Silva.h5"):
|
|
| 97 |
st.write("β
Modello `Silva.h5` caricato, nessun nuovo training necessario!")
|
| 98 |
else:
|
| 99 |
st.write("π Training in corso perchΓ© `Silva.h5` non esiste...")
|
| 100 |
-
|
| 101 |
-
# Caricare il modello VGG16 pre-addestrato
|
| 102 |
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(64, 64, 3))
|
| 103 |
-
|
| 104 |
-
# Congelare i livelli convoluzionali
|
| 105 |
for layer in base_model.layers:
|
| 106 |
layer.trainable = False
|
| 107 |
|
| 108 |
-
# Aggiungere nuovi livelli Dense
|
| 109 |
x = Flatten()(base_model.output)
|
| 110 |
x = Dense(256, activation="relu")(x)
|
| 111 |
x = Dense(128, activation="relu")(x)
|
| 112 |
output = Dense(len(set(y_train)), activation="softmax")(x)
|
| 113 |
|
| 114 |
-
# Creare il modello finale
|
| 115 |
model = Model(inputs=base_model.input, outputs=output)
|
| 116 |
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
|
| 117 |
|
| 118 |
-
# π
|
| 119 |
progress_bar = st.progress(0)
|
| 120 |
status_text = st.empty()
|
| 121 |
start_time = time.time()
|
| 122 |
|
| 123 |
-
|
| 124 |
-
for epoch in range(10):
|
| 125 |
-
history = model.fit(X_train, y_train, epochs=1)
|
| 126 |
-
progress_bar.progress((epoch + 1) / 10)
|
| 127 |
-
elapsed_time = time.time() - start_time
|
| 128 |
-
status_text.text(f"β³ Tempo rimanente stimato: {int(elapsed_time / (epoch + 1) * (10 - (epoch + 1)))} secondi")
|
| 129 |
|
| 130 |
st.write("β
Addestramento completato!")
|
| 131 |
|
|
@@ -133,6 +78,40 @@ else:
|
|
| 133 |
model.save("Silva.h5")
|
| 134 |
st.write("β
Modello salvato come `Silva.h5`!")
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
# π Bottone per scaricare il modello
|
| 137 |
if os.path.exists("Silva.h5"):
|
| 138 |
with open("Silva.h5", "rb") as f:
|
|
@@ -148,11 +127,11 @@ def upload_model():
|
|
| 148 |
api.upload_file(
|
| 149 |
path_or_fileobj="Silva.h5",
|
| 150 |
path_in_repo="Silva.h5",
|
| 151 |
-
repo_id="scontess/trainigVVG16",
|
| 152 |
repo_type="space"
|
| 153 |
)
|
| 154 |
-
st.success("β
Modello 'Silva' caricato su Hugging Face!")
|
| 155 |
|
| 156 |
st.write("π₯ Carica il modello Silva su Hugging Face")
|
| 157 |
if st.button("π Carica Silva su Model Store"):
|
| 158 |
-
upload_model()
|
|
|
|
| 9 |
from tensorflow.keras.models import Model, load_model
|
| 10 |
from datasets import load_dataset
|
| 11 |
import matplotlib.pyplot as plt
|
| 12 |
+
from sklearn.metrics import confusion_matrix, classification_report
|
| 13 |
import seaborn as sns
|
| 14 |
from huggingface_hub import HfApi
|
| 15 |
import os
|
| 16 |
|
| 17 |
+
# π Percorso della cache
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
os.environ["HF_HOME"] = "/app/.cache"
|
| 19 |
os.environ["HF_DATASETS_CACHE"] = "/app/.cache"
|
| 20 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 21 |
|
| 22 |
+
# π Autenticazione Hugging Face
|
| 23 |
if HF_TOKEN:
|
| 24 |
api = HfApi()
|
| 25 |
user_info = api.whoami(HF_TOKEN)
|
| 26 |
+
st.write(f"β
Autenticato come {user_info.get('name', 'Utente sconosciuto')}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
else:
|
| 28 |
st.warning("β οΈ Nessun token API trovato! Verifica il Secret nello Space.")
|
| 29 |
|
| 30 |
+
# π Caricamento del dataset
|
| 31 |
st.write("π Caricamento di 300 immagini da `tiny-imagenet`...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
dataset = load_dataset("zh-plus/tiny-imagenet", split="train")
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
image_list = []
|
| 35 |
label_list = []
|
| 36 |
|
|
|
|
| 37 |
for i, sample in enumerate(dataset):
|
| 38 |
if i >= 300: # Prende solo 300 immagini
|
| 39 |
break
|
| 40 |
+
image = tf.image.resize(sample["image"], (64, 64)) / 255.0 # Normalizzazione
|
|
|
|
| 41 |
image_list.append(image.numpy())
|
| 42 |
label_list.append(np.array(sample["label"]))
|
| 43 |
|
| 44 |
X_train = np.array(image_list)
|
| 45 |
y_train = np.array(label_list)
|
| 46 |
|
| 47 |
+
st.write(f"β
Scaricate e preprocessate {len(X_train)} immagini da `tiny-imagenet/64x64`!")
|
| 48 |
|
| 49 |
# π Caricamento del modello
|
| 50 |
if os.path.exists("Silva.h5"):
|
|
|
|
| 52 |
st.write("β
Modello `Silva.h5` caricato, nessun nuovo training necessario!")
|
| 53 |
else:
|
| 54 |
st.write("π Training in corso perchΓ© `Silva.h5` non esiste...")
|
| 55 |
+
|
|
|
|
| 56 |
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(64, 64, 3))
|
|
|
|
|
|
|
| 57 |
for layer in base_model.layers:
|
| 58 |
layer.trainable = False
|
| 59 |
|
|
|
|
| 60 |
x = Flatten()(base_model.output)
|
| 61 |
x = Dense(256, activation="relu")(x)
|
| 62 |
x = Dense(128, activation="relu")(x)
|
| 63 |
output = Dense(len(set(y_train)), activation="softmax")(x)
|
| 64 |
|
|
|
|
| 65 |
model = Model(inputs=base_model.input, outputs=output)
|
| 66 |
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
|
| 67 |
|
| 68 |
+
# π Training con barra di progresso
|
| 69 |
progress_bar = st.progress(0)
|
| 70 |
status_text = st.empty()
|
| 71 |
start_time = time.time()
|
| 72 |
|
| 73 |
+
history = model.fit(X_train, y_train, epochs=10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
st.write("β
Addestramento completato!")
|
| 76 |
|
|
|
|
| 78 |
model.save("Silva.h5")
|
| 79 |
st.write("β
Modello salvato come `Silva.h5`!")
|
| 80 |
|
| 81 |
+
# π Calcolo delle metriche
|
| 82 |
+
y_pred = np.argmax(model.predict(X_train), axis=1)
|
| 83 |
+
accuracy = np.mean(y_pred == y_train)
|
| 84 |
+
rmse = np.sqrt(np.mean((y_pred - y_train) ** 2))
|
| 85 |
+
report = classification_report(y_train, y_pred, output_dict=True)
|
| 86 |
+
|
| 87 |
+
recall = report["weighted avg"]["recall"]
|
| 88 |
+
precision = report["weighted avg"]["precision"]
|
| 89 |
+
f1_score = report["weighted avg"]["f1-score"]
|
| 90 |
+
|
| 91 |
+
st.write(f"π **Accuracy:** {accuracy:.4f}")
|
| 92 |
+
st.write(f"π **RMSE:** {rmse:.4f}")
|
| 93 |
+
st.write(f"π **Precision:** {precision:.4f}")
|
| 94 |
+
st.write(f"π **Recall:** {recall:.4f}")
|
| 95 |
+
st.write(f"π **F1-Score:** {f1_score:.4f}")
|
| 96 |
+
|
| 97 |
+
# π Bottone per generare la matrice di confusione
|
| 98 |
+
if st.button("π Genera matrice di confusione"):
|
| 99 |
+
conf_matrix = confusion_matrix(y_train, y_pred)
|
| 100 |
+
fig, ax = plt.subplots(figsize=(10, 7))
|
| 101 |
+
sns.heatmap(conf_matrix, annot=True, cmap="Blues", fmt="d", ax=ax)
|
| 102 |
+
st.pyplot(fig)
|
| 103 |
+
st.write("β
Matrice di confusione generata!")
|
| 104 |
+
|
| 105 |
+
# π Grafico per Loss e Accuracy
|
| 106 |
+
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
|
| 107 |
+
ax[0].plot(history.history["loss"], label="Loss")
|
| 108 |
+
ax[1].plot(history.history["accuracy"], label="Accuracy")
|
| 109 |
+
ax[0].set_title("Loss durante il training")
|
| 110 |
+
ax[1].set_title("Accuracy durante il training")
|
| 111 |
+
ax[0].legend()
|
| 112 |
+
ax[1].legend()
|
| 113 |
+
st.pyplot(fig)
|
| 114 |
+
|
| 115 |
# π Bottone per scaricare il modello
|
| 116 |
if os.path.exists("Silva.h5"):
|
| 117 |
with open("Silva.h5", "rb") as f:
|
|
|
|
| 127 |
api.upload_file(
|
| 128 |
path_or_fileobj="Silva.h5",
|
| 129 |
path_in_repo="Silva.h5",
|
| 130 |
+
repo_id="scontess/trainigVVG16",
|
| 131 |
repo_type="space"
|
| 132 |
)
|
| 133 |
+
st.success("β
Modello 'Silva.h5' caricato su Hugging Face!")
|
| 134 |
|
| 135 |
st.write("π₯ Carica il modello Silva su Hugging Face")
|
| 136 |
if st.button("π Carica Silva su Model Store"):
|
| 137 |
+
upload_model()
|
src/{streamlit_app_metriche.py β streamlit_app_old.py}
RENAMED
|
@@ -9,42 +9,87 @@ from tensorflow.keras.layers import Dense, Flatten
|
|
| 9 |
from tensorflow.keras.models import Model, load_model
|
| 10 |
from datasets import load_dataset
|
| 11 |
import matplotlib.pyplot as plt
|
| 12 |
-
from sklearn.metrics import confusion_matrix
|
| 13 |
import seaborn as sns
|
| 14 |
from huggingface_hub import HfApi
|
| 15 |
import os
|
| 16 |
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
os.environ["HF_HOME"] = "/app/.cache"
|
| 19 |
os.environ["HF_DATASETS_CACHE"] = "/app/.cache"
|
| 20 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 21 |
|
| 22 |
-
|
| 23 |
if HF_TOKEN:
|
| 24 |
api = HfApi()
|
| 25 |
user_info = api.whoami(HF_TOKEN)
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
else:
|
| 28 |
st.warning("β οΈ Nessun token API trovato! Verifica il Secret nello Space.")
|
| 29 |
|
| 30 |
-
# π
|
| 31 |
st.write("π Caricamento di 300 immagini da `tiny-imagenet`...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
dataset = load_dataset("zh-plus/tiny-imagenet", split="train")
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
image_list = []
|
| 35 |
label_list = []
|
| 36 |
|
|
|
|
| 37 |
for i, sample in enumerate(dataset):
|
| 38 |
if i >= 300: # Prende solo 300 immagini
|
| 39 |
break
|
| 40 |
-
|
|
|
|
| 41 |
image_list.append(image.numpy())
|
| 42 |
label_list.append(np.array(sample["label"]))
|
| 43 |
|
| 44 |
X_train = np.array(image_list)
|
| 45 |
y_train = np.array(label_list)
|
| 46 |
|
| 47 |
-
st.write(f"β
Scaricate e preprocessate {len(X_train)} immagini da `tiny-
|
| 48 |
|
| 49 |
# π Caricamento del modello
|
| 50 |
if os.path.exists("Silva.h5"):
|
|
@@ -52,25 +97,35 @@ if os.path.exists("Silva.h5"):
|
|
| 52 |
st.write("β
Modello `Silva.h5` caricato, nessun nuovo training necessario!")
|
| 53 |
else:
|
| 54 |
st.write("π Training in corso perchΓ© `Silva.h5` non esiste...")
|
| 55 |
-
|
|
|
|
| 56 |
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(64, 64, 3))
|
|
|
|
|
|
|
| 57 |
for layer in base_model.layers:
|
| 58 |
layer.trainable = False
|
| 59 |
|
|
|
|
| 60 |
x = Flatten()(base_model.output)
|
| 61 |
x = Dense(256, activation="relu")(x)
|
| 62 |
x = Dense(128, activation="relu")(x)
|
| 63 |
output = Dense(len(set(y_train)), activation="softmax")(x)
|
| 64 |
|
|
|
|
| 65 |
model = Model(inputs=base_model.input, outputs=output)
|
| 66 |
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
|
| 67 |
|
| 68 |
-
# π
|
| 69 |
progress_bar = st.progress(0)
|
| 70 |
status_text = st.empty()
|
| 71 |
start_time = time.time()
|
| 72 |
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
st.write("β
Addestramento completato!")
|
| 76 |
|
|
@@ -78,40 +133,6 @@ else:
|
|
| 78 |
model.save("Silva.h5")
|
| 79 |
st.write("β
Modello salvato come `Silva.h5`!")
|
| 80 |
|
| 81 |
-
# π Calcolo delle metriche
|
| 82 |
-
y_pred = np.argmax(model.predict(X_train), axis=1)
|
| 83 |
-
accuracy = np.mean(y_pred == y_train)
|
| 84 |
-
rmse = np.sqrt(np.mean((y_pred - y_train) ** 2))
|
| 85 |
-
report = classification_report(y_train, y_pred, output_dict=True)
|
| 86 |
-
|
| 87 |
-
recall = report["weighted avg"]["recall"]
|
| 88 |
-
precision = report["weighted avg"]["precision"]
|
| 89 |
-
f1_score = report["weighted avg"]["f1-score"]
|
| 90 |
-
|
| 91 |
-
st.write(f"π **Accuracy:** {accuracy:.4f}")
|
| 92 |
-
st.write(f"π **RMSE:** {rmse:.4f}")
|
| 93 |
-
st.write(f"π **Precision:** {precision:.4f}")
|
| 94 |
-
st.write(f"π **Recall:** {recall:.4f}")
|
| 95 |
-
st.write(f"π **F1-Score:** {f1_score:.4f}")
|
| 96 |
-
|
| 97 |
-
# π Bottone per generare la matrice di confusione
|
| 98 |
-
if st.button("π Genera matrice di confusione"):
|
| 99 |
-
conf_matrix = confusion_matrix(y_train, y_pred)
|
| 100 |
-
fig, ax = plt.subplots(figsize=(10, 7))
|
| 101 |
-
sns.heatmap(conf_matrix, annot=True, cmap="Blues", fmt="d", ax=ax)
|
| 102 |
-
st.pyplot(fig)
|
| 103 |
-
st.write("β
Matrice di confusione generata!")
|
| 104 |
-
|
| 105 |
-
# π Grafico per Loss e Accuracy
|
| 106 |
-
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
|
| 107 |
-
ax[0].plot(history.history["loss"], label="Loss")
|
| 108 |
-
ax[1].plot(history.history["accuracy"], label="Accuracy")
|
| 109 |
-
ax[0].set_title("Loss durante il training")
|
| 110 |
-
ax[1].set_title("Accuracy durante il training")
|
| 111 |
-
ax[0].legend()
|
| 112 |
-
ax[1].legend()
|
| 113 |
-
st.pyplot(fig)
|
| 114 |
-
|
| 115 |
# π Bottone per scaricare il modello
|
| 116 |
if os.path.exists("Silva.h5"):
|
| 117 |
with open("Silva.h5", "rb") as f:
|
|
@@ -127,11 +148,11 @@ def upload_model():
|
|
| 127 |
api.upload_file(
|
| 128 |
path_or_fileobj="Silva.h5",
|
| 129 |
path_in_repo="Silva.h5",
|
| 130 |
-
repo_id="scontess/trainigVVG16",
|
| 131 |
repo_type="space"
|
| 132 |
)
|
| 133 |
-
st.success("β
Modello 'Silva
|
| 134 |
|
| 135 |
st.write("π₯ Carica il modello Silva su Hugging Face")
|
| 136 |
if st.button("π Carica Silva su Model Store"):
|
| 137 |
-
upload_model()
|
|
|
|
| 9 |
from tensorflow.keras.models import Model, load_model
|
| 10 |
from datasets import load_dataset
|
| 11 |
import matplotlib.pyplot as plt
|
| 12 |
+
from sklearn.metrics import confusion_matrix
|
| 13 |
import seaborn as sns
|
| 14 |
from huggingface_hub import HfApi
|
| 15 |
import os
|
| 16 |
|
| 17 |
+
|
| 18 |
+
# π Percorso della cartella dove sarΓ salvato il dataset se uso TFlow, non serve se setto hf_dataset_cache
|
| 19 |
+
#DATA_DIR = "/app" #"/tmp"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# π Autenticazione Hugging Face dal Secret nello Space
|
| 23 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 24 |
os.environ["HF_HOME"] = "/app/.cache"
|
| 25 |
os.environ["HF_DATASETS_CACHE"] = "/app/.cache"
|
|
|
|
| 26 |
|
| 27 |
+
|
| 28 |
if HF_TOKEN:
|
| 29 |
api = HfApi()
|
| 30 |
user_info = api.whoami(HF_TOKEN)
|
| 31 |
+
|
| 32 |
+
if "name" in user_info:
|
| 33 |
+
st.write(f"β
Autenticato come {user_info['name']}")
|
| 34 |
+
else:
|
| 35 |
+
st.warning("β οΈ Token API non valido! Controlla il Secret nello Space.")
|
| 36 |
else:
|
| 37 |
st.warning("β οΈ Nessun token API trovato! Verifica il Secret nello Space.")
|
| 38 |
|
| 39 |
+
# π Carica solo 300 immagini da `imagenet_resized/64x64`
|
| 40 |
st.write("π Caricamento di 300 immagini da `tiny-imagenet`...")
|
| 41 |
+
|
| 42 |
+
# π Recupera il valore della variabile d'ambiente
|
| 43 |
+
hf_cache_path = os.getenv("HF_DATASETS_CACHE", "β Variabile non impostata!")
|
| 44 |
+
|
| 45 |
+
# πΉ Mostra il valore nella UI di Streamlit
|
| 46 |
+
st.write(f"π Cache dei dataset Hugging Face: {hf_cache_path}")
|
| 47 |
+
|
| 48 |
+
# π Testa se la cache ha i permessi giusti PRIMA di caricare il dataset
|
| 49 |
+
test_file = "/app/.cache/test.txt"
|
| 50 |
+
try:
|
| 51 |
+
with open(test_file, "w") as f:
|
| 52 |
+
f.write("Test permessi OK!")
|
| 53 |
+
st.write("β
La cartella ha i permessi giusti!")
|
| 54 |
+
except PermissionError:
|
| 55 |
+
st.error("β ERRORE: La cartella /app/.cache non ha permessi di scrittura!")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# π Carica il dataset direttamente da Hugging Face
|
| 60 |
+
os.environ["HF_HOME"] = "/tmp/huggingface"
|
| 61 |
+
os.environ["HF_DATASETS_CACHE"] = "/tmp/huggingface"
|
| 62 |
+
os.makedirs(os.getenv("HF_DATASETS_CACHE"), exist_ok=True)
|
| 63 |
dataset = load_dataset("zh-plus/tiny-imagenet", split="train")
|
| 64 |
|
| 65 |
+
# π Recupera il primo esempio
|
| 66 |
+
sample = dataset[0]
|
| 67 |
+
image = sample["image"]
|
| 68 |
+
label = sample["label"]
|
| 69 |
+
|
| 70 |
+
# π Mostra l'immagine e la classe in Streamlit
|
| 71 |
+
st.image(image, caption=f"Classe: {label}", use_container_width=True)
|
| 72 |
+
st.write(f"π Esempio dal dataset: {sample}")
|
| 73 |
+
|
| 74 |
+
#Carica il dataset esterno da imagenet PER TENSORFLOW
|
| 75 |
+
#imagenet = tfds.load("imagenet_resized/64x64", split="train", as_supervised=True, download=True, data_dir=DATA_DIR)
|
| 76 |
+
|
| 77 |
image_list = []
|
| 78 |
label_list = []
|
| 79 |
|
| 80 |
+
#for i, (image, label) in enumerate(imagenet.take(300)): # Prende solo 300 immagini PER TENSORFLOW
|
| 81 |
for i, sample in enumerate(dataset):
|
| 82 |
if i >= 300: # Prende solo 300 immagini
|
| 83 |
break
|
| 84 |
+
|
| 85 |
+
image = tf.image.resize(image, (64, 64)) / 255.0 # Normalizzazione
|
| 86 |
image_list.append(image.numpy())
|
| 87 |
label_list.append(np.array(sample["label"]))
|
| 88 |
|
| 89 |
X_train = np.array(image_list)
|
| 90 |
y_train = np.array(label_list)
|
| 91 |
|
| 92 |
+
st.write(f"β
Scaricate e preprocessate {len(X_train)} immagini da `tiny-imagene/64x64`!")
|
| 93 |
|
| 94 |
# π Caricamento del modello
|
| 95 |
if os.path.exists("Silva.h5"):
|
|
|
|
| 97 |
st.write("β
Modello `Silva.h5` caricato, nessun nuovo training necessario!")
|
| 98 |
else:
|
| 99 |
st.write("π Training in corso perchΓ© `Silva.h5` non esiste...")
|
| 100 |
+
|
| 101 |
+
# Caricare il modello VGG16 pre-addestrato
|
| 102 |
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(64, 64, 3))
|
| 103 |
+
|
| 104 |
+
# Congelare i livelli convoluzionali
|
| 105 |
for layer in base_model.layers:
|
| 106 |
layer.trainable = False
|
| 107 |
|
| 108 |
+
# Aggiungere nuovi livelli Dense
|
| 109 |
x = Flatten()(base_model.output)
|
| 110 |
x = Dense(256, activation="relu")(x)
|
| 111 |
x = Dense(128, activation="relu")(x)
|
| 112 |
output = Dense(len(set(y_train)), activation="softmax")(x)
|
| 113 |
|
| 114 |
+
# Creare il modello finale
|
| 115 |
model = Model(inputs=base_model.input, outputs=output)
|
| 116 |
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
|
| 117 |
|
| 118 |
+
# π Barra di progresso del training
|
| 119 |
progress_bar = st.progress(0)
|
| 120 |
status_text = st.empty()
|
| 121 |
start_time = time.time()
|
| 122 |
|
| 123 |
+
# π Addestramento con progress bar
|
| 124 |
+
for epoch in range(10):
|
| 125 |
+
history = model.fit(X_train, y_train, epochs=1)
|
| 126 |
+
progress_bar.progress((epoch + 1) / 10)
|
| 127 |
+
elapsed_time = time.time() - start_time
|
| 128 |
+
status_text.text(f"β³ Tempo rimanente stimato: {int(elapsed_time / (epoch + 1) * (10 - (epoch + 1)))} secondi")
|
| 129 |
|
| 130 |
st.write("β
Addestramento completato!")
|
| 131 |
|
|
|
|
| 133 |
model.save("Silva.h5")
|
| 134 |
st.write("β
Modello salvato come `Silva.h5`!")
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
# π Bottone per scaricare il modello
|
| 137 |
if os.path.exists("Silva.h5"):
|
| 138 |
with open("Silva.h5", "rb") as f:
|
|
|
|
| 148 |
api.upload_file(
|
| 149 |
path_or_fileobj="Silva.h5",
|
| 150 |
path_in_repo="Silva.h5",
|
| 151 |
+
repo_id="scontess/trainigVVG16", #"scontess/Silva",
|
| 152 |
repo_type="space"
|
| 153 |
)
|
| 154 |
+
st.success("β
Modello 'Silva' caricato su Hugging Face!")
|
| 155 |
|
| 156 |
st.write("π₯ Carica il modello Silva su Hugging Face")
|
| 157 |
if st.button("π Carica Silva su Model Store"):
|
| 158 |
+
upload_model()
|