trainigVVG16 / src /streamlit_app.py
scontess
ss
5ac6c39
raw
history blame
3.85 kB
import streamlit as st
import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np
import time
import tensorflow.keras as keras
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model, load_model
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns
from huggingface_hub import HfApi
import os
# πŸ“Œ Percorso della cartella dove Γ¨ salvato il dataset
DATA_DIR = "/app/src"
# πŸ“Œ Autenticazione Hugging Face dal Secret nello Space
HF_TOKEN = os.getenv("HF_TOKEN")
if HF_TOKEN:
api = HfApi()
user_info = api.whoami(HF_TOKEN)
if "name" in user_info:
st.write(f"βœ… Autenticato come {user_info['name']}")
else:
st.warning("⚠️ Token API non valido! Controlla il Secret nello Space.")
else:
st.warning("⚠️ Nessun token API trovato! Verifica il Secret nello Space.")
# πŸ“Œ Carica solo 300 immagini da ImageNet
st.write("πŸ”„ Caricamento di 300 immagini da ImageNet...")
imagenet = tfds.load("imagenet2012", split="train", as_supervised=True, download=False, data_dir=DATA_DIR)
image_list = []
label_list = []
for i, (image, label) in enumerate(imagenet.take(300)): # Prende solo 300 immagini
image = tf.image.resize(image, (224, 224)) / 255.0 # Normalizzazione
image_list.append(image.numpy())
label_list.append(label.numpy())
X_train = np.array(image_list)
y_train = np.array(label_list)
st.write(f"βœ… Scaricate e preprocessate {len(X_train)} immagini da ImageNet!")
# πŸ“Œ Caricamento del modello
if os.path.exists("Silva.h5"):
model = load_model("Silva.h5")
st.write("βœ… Modello `Silva.h5` caricato, nessun nuovo training necessario!")
else:
st.write("πŸš€ Training in corso perchΓ© `Silva.h5` non esiste...")
# Caricare il modello VGG16 pre-addestrato
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
# Congelare i livelli convoluzionali
for layer in base_model.layers:
layer.trainable = False
# Aggiungere nuovi livelli Dense
x = Flatten()(base_model.output)
x = Dense(256, activation="relu")(x)
x = Dense(128, activation="relu")(x)
output = Dense(len(set(y_train)), activation="softmax")(x)
# Creare il modello finale
model = Model(inputs=base_model.input, outputs=output)
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# πŸ“Œ Barra di progresso del training
progress_bar = st.progress(0)
status_text = st.empty()
start_time = time.time()
# πŸ“Œ Addestramento con progress bar
for epoch in range(10):
history = model.fit(X_train, y_train, epochs=1)
progress_bar.progress((epoch + 1) / 10)
elapsed_time = time.time() - start_time
status_text.text(f"⏳ Tempo rimanente stimato: {int(elapsed_time / (epoch + 1) * (10 - (epoch + 1)))} secondi")
st.write("βœ… Addestramento completato!")
# πŸ“Œ Salvare il modello
model.save("Silva.h5")
st.write("βœ… Modello salvato come `Silva.h5`!")
# πŸ“Œ Bottone per scaricare il modello
if os.path.exists("Silva.h5"):
with open("Silva.h5", "rb") as f:
st.download_button(
label="πŸ“₯ Scarica il modello Silva.h5",
data=f,
file_name="Silva.h5",
mime="application/octet-stream"
)
# Bottone per caricare il modello su Hugging Face 2
def upload_model():
api.upload_file(
path_or_fileobj="Silva.h5",
path_in_repo="Silva.h5",
repo_id="scontess/Silva",
repo_type="model"
)
st.success("βœ… Modello 'Silva' caricato su Hugging Face!")
st.write("πŸ“₯ Carica il modello Silva su Hugging Face")
if st.button("πŸš€ Carica Silva su Model Store"):
upload_model()