Skym616 commited on
Commit
93f30c6
·
1 Parent(s): dfec19e
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .env
2
+ cnn_model1.pth
.streamlit/config.toml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#0d6efd"
4
+ backgroundColor="#0f1214"
5
+ secondaryBackgroundColor="#FFFFFF"
6
+ textColor="#31333F"
7
+ font="sans serif"
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Tumor Prediction
3
- emoji: 💻
4
  colorFrom: pink
5
  colorTo: blue
6
  sdk: streamlit
@@ -9,4 +9,29 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Tumor Prediction
3
+ emoji: 🧠
4
  colorFrom: pink
5
  colorTo: blue
6
  sdk: streamlit
 
9
  pinned: false
10
  ---
11
 
12
+
13
+ # Tumor Prediction Application
14
+
15
+ ![Tumor Prediction Preview](preview.png)
16
+
17
+ ## Overview
18
+
19
+ This application uses advanced machine learning models to predict the type of tumor from medical images. It leverages the latest advancements in artificial intelligence to provide accurate and quick predictions.
20
+
21
+ ## Features
22
+
23
+ - **Image Upload**: Upload an image of a tumor for prediction.
24
+ - **Prediction**: Get a prediction of the tumor type with confidence percentage.
25
+ - **Translation**: Translate the prediction summary from French to Spanish.
26
+ - **Text-to-Speech**: Convert the translated text to speech.
27
+ - **Interactive UI**: User-friendly interface with Bootstrap and Particles.js integration.
28
+
29
+ ## Technologies Used
30
+
31
+ - **Streamlit**: For building the web application.
32
+ - **PyTorch**: For the deep learning model.
33
+ - **Hugging Face API**: For text summarization and translation.
34
+ - **Google Text-to-Speech (gTTS)**: For converting text to speech.
35
+ - **Mistral LLM**: For generating detailed medical diagnoses.
36
+ - **Bootstrap**: For responsive design.
37
+ - **Particles.js**: For background particle effects.
__pycache__/app.cpython-310.pyc ADDED
Binary file (8.5 kB). View file
 
__pycache__/functions.cpython-310.pyc ADDED
Binary file (2.57 kB). View file
 
__pycache__/layout.cpython-310.pyc ADDED
Binary file (3.76 kB). View file
 
__pycache__/predict.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
__pycache__/styles.cpython-310.pyc ADDED
Binary file (3.8 kB). View file
 
__pycache__/utils.cpython-310.pyc ADDED
Binary file (573 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functions import generate_text, get_diagnosis, summarize_text, text_to_speech, translate_fr_to_es
2
+ from predict import predict
3
+ from utils import load_image
4
+ from dotenv import load_dotenv
5
+ import streamlit as st
6
+ from layout import show_header, show_prediction_section, show_footer
7
+ from styles import inject_particles_js, inject_bootstrap_and_custom_css
8
+
9
+ # Configuration de la page
10
+ st.set_page_config(
11
+ page_title="Application Multitâche",
12
+ page_icon=":robot_face:",
13
+ layout="wide",
14
+ initial_sidebar_state="auto",
15
+ )
16
+
17
+ load_dotenv()
18
+
19
+ # Injecter les styles et scripts
20
+ inject_particles_js()
21
+ inject_bootstrap_and_custom_css()
22
+
23
+ # Charger les images
24
+ image_path = "images/tumor.png"
25
+ image_base64 = load_image(image_path)
26
+ linkedin_image_base64 = load_image("images/skill-icons--linkedin.svg")
27
+ github_image_base64 = load_image("images/skill-icons--github-dark.svg")
28
+ huggingface_image_base64 = load_image("images/Hugging_Face_idJ6-I79C__1.svg")
29
+ favicon_image_base64 = load_image("images/favicon.png")
30
+
31
+ # Afficher l'en-tête
32
+ show_header(favicon_image_base64, image_base64)
33
+
34
+ # Afficher la section de prédiction
35
+ show_prediction_section()
36
+
37
+ _, col2, col3, _ = st.columns([2, 6, 6, 2])
38
+
39
+ translation = None
40
+ with col2:
41
+ uploaded_file = st.file_uploader("Téléchargez une image de tumeur...", type=["jpg", "jpeg", "png"])
42
+ if uploaded_file:
43
+ _, col2, _ = st.columns([1, 3, 1])
44
+ with col2:
45
+ st.image(uploaded_file, caption="Image de tumeur", width=400)
46
+ button = st.button("Prédire", use_container_width=True)
47
+ if button:
48
+ st.spinner("Prédiction en cours...")
49
+ predicted, confidence = predict(uploaded_file)
50
+ diagnosis_prompt = get_diagnosis(predicted, confidence)
51
+ diagnosis = generate_text(diagnosis_prompt)
52
+ summary = summarize_text(diagnosis)
53
+ translation = translate_fr_to_es(summary)
54
+ output_path = text_to_speech(translation)
55
+
56
+ if uploaded_file and translation:
57
+ with col3:
58
+ st.markdown(
59
+ f"""
60
+ <div class="card border-0 shadow-sm p-4">
61
+ <div class="card-body">
62
+ <h2 class="gradient-text"> Resultat de la prédiction </h2>
63
+ <p class="lead"> {summary} </p>
64
+ <p class="lead"> {translation} </p
65
+ </div>
66
+ </div>
67
+ """,
68
+ unsafe_allow_html=True,
69
+ )
70
+
71
+ st.audio(output_path, format="audio/mp3")
72
+
73
+ # Afficher le pied de page
74
+ show_footer(linkedin_image_base64, github_image_base64, huggingface_image_base64)
functions.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+ import time
4
+ from gtts import gTTS
5
+
6
+
7
+ HF_API_KEY = os.getenv("HUGGING_FACE_API_KEY")
8
+ MISTRAL_API_KEY = os.getenv("MISTRAL_IA_API_KEY")
9
+
10
+
11
+ # Summarisation model
12
+ def query(payload):
13
+ headers = {"Authorization": f"Bearer {HF_API_KEY}"}
14
+ API_URL_SUMMARISATION_MODEL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
15
+ response = requests.post(API_URL_SUMMARISATION_MODEL, headers=headers, json=payload)
16
+ return response.json()[0]['summary_text']
17
+
18
+ def summarize_text(text):
19
+ payload = {"inputs": text}
20
+ return query(payload)
21
+
22
+ #Text generation model
23
+ def get_diagnosis(prediction, confidence):
24
+ labels = ["glioma", "meningioma", "notumor", "pituitary"]
25
+ result = labels[prediction]
26
+ prompt = f'Tu es un médecin qui doit annoncer à un patient le type de tumeur qu\'il a. Le type de tumeur prédit est "{result}" avec une confiance de {confidence}. Veuillez fournir un diagnostic concis, précis et des conseils.'
27
+ return prompt
28
+
29
+
30
+ def generate_text(input_text, max_retries=3):
31
+ MISTRAL_API_URL = "https://api.mistral.ai/v1/chat/completions"
32
+
33
+ headers = {
34
+ "Authorization": f"Bearer {MISTRAL_API_KEY}",
35
+ "Content-Type": "application/json"
36
+ }
37
+
38
+ payload = {
39
+ "model": "mistral-small-latest",
40
+ "messages": [{"role": "user", "content": input_text}]
41
+ }
42
+
43
+ for _ in range(max_retries):
44
+ response = requests.post(MISTRAL_API_URL, headers=headers, json=payload)
45
+ if response.status_code == 200:
46
+ return response.json()['choices'][0]['message']['content']
47
+ else:
48
+ time.sleep(5)
49
+
50
+ return "Failed to generate text after multiple attempts"
51
+
52
+ # Translation model
53
+ def translate_fr_to_es(input_text, max_retries=5):
54
+ API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-fr-es"
55
+ headers = {"Authorization": f"Bearer {HF_API_KEY}"}
56
+ payload = {"inputs": input_text}
57
+
58
+ for _ in range(max_retries):
59
+ response = requests.post(API_URL, headers=headers, json=payload)
60
+ if response.status_code == 200:
61
+ return response.json()[0]['translation_text']
62
+ else:
63
+ time.sleep(5)
64
+
65
+ return "Une erreur s'est produite veuillez reessayer plus tard."
66
+
67
+ # Audio to text model
68
+ def text_to_speech(input_text):
69
+ outputh_path = "text_to_speech.mp3"
70
+ tts = gTTS(text=input_text, lang="es")
71
+ tts.save(outputh_path)
72
+ return outputh_path
images/Hugging_Face_idJ6-I79C__1.svg ADDED
images/banner.png ADDED
images/favicon.png ADDED
images/skill-icons--github-dark.svg ADDED
images/skill-icons--linkedin.svg ADDED
images/tumor.png ADDED
layout.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ def show_header(favicon_image_base64, image_base64):
4
+ st.markdown(
5
+ f"""
6
+ <div class="container card border-0 shadow-sm p-4 ">
7
+ <div class="card-body">
8
+ <div class="row d-flex">
9
+ <div class="col-md-6 position-relative">
10
+ <img src="data:image/png;base64,{favicon_image_base64}" alt="favicon" class="position-absolute w-100 h-auto text-center top-0 bottom-0 z-0">
11
+ <h1 class="gradient-text"> Prédiction de la Tumeur avec l'Intelligence Artificielle </h1>
12
+ <h4 style="text-align: justify"> Utilisez notre solution avancée pour prédire le type de tumeur à partir d'images médicales. Téléchargez une image de tumeur et obtenez une prédiction précise en quelques secondes. </h4>
13
+ </div>
14
+ <div class="col-md-6 d-flex border-0">
15
+ <img src="data:image/png;base64,{image_base64}" class="img-fluid mx-auto" alt="Responsive image">
16
+ </div>
17
+ </div>
18
+ </div>
19
+ </div>
20
+ """,
21
+ unsafe_allow_html=True,
22
+ )
23
+
24
+ def show_prediction_section():
25
+ st.markdown(
26
+ f"""
27
+ <div class="container mb-64" id="competences">
28
+ <div class="h1 text-center my-4 gradient-text">
29
+ Faites une prédiction
30
+ </div>
31
+ </div>
32
+ """,
33
+ unsafe_allow_html=True,
34
+ )
35
+
36
+ def show_footer(linkedin_image_base64, github_image_base64, huggingface_image_base64):
37
+ st.markdown(
38
+ f"""
39
+ <footer class="container shadow-sm text-center text-lg-start mt-5">
40
+ <div class="container p-4">
41
+ <div class="row">
42
+ <div class="col-lg-6 col-md-12 mb-4 mb-md-0">
43
+ <h5 class="text-uppercase gradient-text">Contact Information</h5>
44
+ <p>
45
+ Yannick Simo<br>
46
+ Junior Data Scientist<br>
47
+ Email: [email protected]<br>
48
+ </p>
49
+ </div>
50
+ <div class="col-lg-6 col-md-12 mb-4 mb-md-0">
51
+ <h5 class="text-uppercase gradient-text">Suivez-moi</h5>
52
+ <a href="https://www.linkedin.com/in/simo-yannick-38137b231/" class="btn btn-floating p-0" role="button">
53
+ <img src="data:image/svg+xml;base64,{linkedin_image_base64}" class="w-50" alt="linkedin icon">
54
+ </a>
55
+ <a href="https://github.com/Skym616" class="btn btn-floating p-0" role="button">
56
+ <img src="data:image/svg+xml;base64,{github_image_base64}" class="w-50" alt="github icon">
57
+ </a>
58
+ <a href="https://huggingface.co/Skym616" class="btn btn-floating p-0" role="button">
59
+ <img src="data:image/svg+xml;base64,{huggingface_image_base64}" class="w-50" alt="huggingface icon">
60
+ </a>
61
+ </div>
62
+ </div>
63
+ </div>
64
+ <div class="text-center p-3 text-white gradient-text">
65
+ &copy; 2025 Yannick Simo. All rights reserved.
66
+ </div>
67
+ </footer>
68
+ """, unsafe_allow_html=True
69
+ )
predict.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.transforms as transforms
4
+ from PIL import Image
5
+
6
+ # Architecture du modèle
7
+ class DeepCNN(nn.Module):
8
+ def __init__(self, num_classes=4):
9
+ super(DeepCNN, self).__init__()
10
+ self.layer1 = nn.Sequential(
11
+ nn.Conv2d(3, 32, kernel_size=3, padding=1),
12
+ nn.BatchNorm2d(32),
13
+ nn.ReLU(),
14
+ nn.MaxPool2d(kernel_size=2)
15
+ )
16
+ self.layer2 = nn.Sequential(
17
+ nn.Conv2d(32, 64, kernel_size=3, padding=1),
18
+ nn.BatchNorm2d(64),
19
+ nn.ReLU(),
20
+ nn.MaxPool2d(2)
21
+ )
22
+ self.layer3 = nn.Sequential(
23
+ nn.Conv2d(64, 128, kernel_size=3),
24
+ nn.BatchNorm2d(128),
25
+ nn.ReLU(),
26
+ nn.MaxPool2d(2)
27
+ )
28
+ self.lqyer4 = nn.Sequential(
29
+ nn.Conv2d(128, 256, kernel_size=3),
30
+ nn.BatchNorm2d(256),
31
+ nn.ReLU(),
32
+ nn.MaxPool2d(2)
33
+ )
34
+ self.fc_layers = nn.Sequential(
35
+ nn.Linear(28800, 1024),
36
+ nn.ReLU(),
37
+ nn.Linear(1024, num_classes)
38
+ )
39
+
40
+ def forward(self, x):
41
+ out = self.layer1(x)
42
+ out = self.layer2(out)
43
+ out = self.layer3(out)
44
+ out = out.view(out.size(0), -1)
45
+ out = self.fc_layers(out)
46
+ return out
47
+
48
+ def load_model(model_path, num_classes=4):
49
+ model = DeepCNN(num_classes=num_classes)
50
+ model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
51
+ model.eval()
52
+ return model
53
+
54
+ # Charger le modèle
55
+ model = load_model('cnn_model1.pth')
56
+
57
+ # Définir les transformations
58
+ transform = transforms.Compose([
59
+ transforms.Resize((128, 128)),
60
+ transforms.ToTensor(),
61
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
62
+ ])
63
+
64
+ # Fonction de prédiction
65
+ def predict(image):
66
+ image = Image.open(image)
67
+ image = transform(image).unsqueeze(0)
68
+ with torch.no_grad():
69
+ outputs = model(image)
70
+ probabilities = torch.nn.functional.softmax(outputs, dim=1)
71
+ confidence, predicted = torch.max(probabilities, 1)
72
+ print(predicted.item(), confidence.item() * 100)
73
+ return predicted.item(), round(confidence.item() * 100, 2)
preview.png ADDED
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gTTS==2.5.4
2
+ Pillow==11.1.0
3
+ python-dotenv==1.0.1
4
+ Requests==2.32.3
5
+ streamlit==1.42.0
6
+ torch==2.6.0+cu126
7
+ torchvision==0.21.0+cu126
styles.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit.components.v1 as components
3
+
4
+ def inject_particles_js():
5
+ particles_html = """
6
+ <div id="particles-js" style="position: fixed; width: 100%; height: 100%; z-index: -1;"></div>
7
+ <script src="https://cdn.jsdelivr.net/particles.js/2.0.0/particles.min.js"></script>
8
+ <script>
9
+ particlesJS("particles-js", {
10
+ "particles": {
11
+ "number": { "value": 100, "density": { "enable": true, "value_area": 800 } },
12
+ "color": { "value": "#f3ec78" },
13
+ "shape": { "type": "circle" },
14
+ "opacity": { "value": 0.5, "random": false },
15
+ "size": { "value": 3, "random": true },
16
+ "line_linked": { "enable": true, "distance": 150, "color": "#f3ec78", "opacity": 0.4, "width": 1 },
17
+ "move": { "enable": true, "speed": 2, "direction": "none", "random": false, "straight": false, "out_mode": "out" }
18
+ },
19
+ "interactivity": {
20
+ "detect_on": "canvas",
21
+ "events": { "onhover": { "enable": true, "mode": "repulse" }, "onclick": { "enable": true, "mode": "push" } },
22
+ "modes": { "repulse": { "distance": 100 }, "push": { "particles_nb": 4 } }
23
+ }
24
+ });
25
+ </script>
26
+ """
27
+ components.html(particles_html, height=1900, scrolling=True)
28
+
29
+ def inject_bootstrap_and_custom_css():
30
+ st.markdown(
31
+ """
32
+ <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-QWTKZyjpPEjISv5WaRU9OFeRpok6YctnYmDr5pNlyT2bRjXh0JMhjY6hW+ALEwIH" crossorigin="anonymous">
33
+ <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-YvpcrYf0tY3lHB60NNkmXc5s9fDVZLESaAA55NDzOxhy9GkcIdslK1eN7N6jIeHz" crossorigin="anonymous"></script>
34
+ <link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;600;700&display=swap" rel="stylesheet">
35
+ <style>
36
+ body, html, * {
37
+ font-family: 'Poppins', sans-serif;
38
+ }
39
+ body {
40
+ background-color: #0c1926;
41
+ }
42
+ .card, footer{
43
+ background-color: #0c1926;
44
+ color: #ffffff;
45
+ }
46
+ .stIFrame{
47
+ position: absolute;
48
+ }
49
+ h1, h2 {
50
+ font-size: 2.5rem;
51
+ }
52
+ p, span, div, a, button {
53
+ font-weight: 400;
54
+ }
55
+ .stMainBlockContainer{
56
+ padding-top: 48px !important;
57
+ }
58
+ p , .stFileUploaderFileName, small{
59
+ font-weight: 600;
60
+ color: #ffffff !important;
61
+ }
62
+ section[data-testid="stFileUploaderDropzone"]{
63
+ color: #ffffff !important;
64
+ background-color: #0c1926 !important;
65
+ }
66
+ .gradient-text {
67
+ background-color: red;
68
+ background-image: linear-gradient(45deg, #f3ec78, #af4261);
69
+ background-size: 100%;
70
+ background-repeat: repeat;
71
+ -webkit-background-clip: text;
72
+ -webkit-text-fill-color: transparent;
73
+ -moz-background-clip: text;
74
+ -moz-text-fill-color: transparent;
75
+ }
76
+ </style>
77
+ """,
78
+ unsafe_allow_html=True,
79
+ )
text_to_speech.mp3 ADDED
Binary file (138 kB). View file
 
utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import requests
3
+
4
+ def load_image(image_path):
5
+ with open(image_path, "rb") as img_file:
6
+ return base64.b64encode(img_file.read()).decode()
7
+
8
+ def load_lottie_url(url):
9
+ r = requests.get(url)
10
+ if r.status_code != 200:
11
+ return None
12
+ return r.json()