Upload 2 files
Browse files- app.py +194 -0
- requirements.txt +14 -0
app.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
from PIL import Image
|
5 |
+
from io import BytesIO
|
6 |
+
from ultralytics import YOLO
|
7 |
+
import os
|
8 |
+
import tempfile
|
9 |
+
import base64
|
10 |
+
import requests
|
11 |
+
from datetime import datetime
|
12 |
+
from gtts import gTTS
|
13 |
+
from googletrans import Translator
|
14 |
+
import google.generativeai as genai # Import Gemini API
|
15 |
+
|
16 |
+
# Configure Google Gemini API
|
17 |
+
GEMINI_API_KEY = os.getenv("GOOGLE_API_KEY")
|
18 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
19 |
+
|
20 |
+
# Load YOLO model for crop disease detection
|
21 |
+
yolo_model = YOLO("models/best.pt")
|
22 |
+
|
23 |
+
# Initialize conversation history if not set
|
24 |
+
if "conversation_history" not in st.session_state:
|
25 |
+
st.session_state.conversation_history = {}
|
26 |
+
|
27 |
+
# Function to preprocess images
|
28 |
+
def preprocess_image(image, target_size=(224, 224)):
|
29 |
+
"""Resize image for AI models."""
|
30 |
+
image = Image.fromarray(image)
|
31 |
+
image = image.resize(target_size)
|
32 |
+
return image
|
33 |
+
|
34 |
+
# Generate response from Gemini AI with history
|
35 |
+
def generate_gemini_response(disease_list, user_context="", conversation_history=None):
|
36 |
+
"""Generate a structured diagnosis using Gemini API, considering conversation history."""
|
37 |
+
try:
|
38 |
+
model = genai.GenerativeModel("gemini-1.5-pro")
|
39 |
+
|
40 |
+
# Start with detected diseases
|
41 |
+
prompt = f"""
|
42 |
+
You are an expert plant pathologist. The detected crop diseases are: {', '.join(disease_list)}.
|
43 |
+
|
44 |
+
User's context or question: {user_context if user_context else "Provide a general analysis"}
|
45 |
+
"""
|
46 |
+
|
47 |
+
# Add past conversation history for better continuity
|
48 |
+
if conversation_history:
|
49 |
+
history_text = "\n\nPrevious conversation:\n"
|
50 |
+
for entry in conversation_history:
|
51 |
+
history_text += f"- User: {entry['question']}\n- AI: {entry['response']}\n"
|
52 |
+
prompt += history_text
|
53 |
+
|
54 |
+
# Ask Gemini for a structured diagnosis
|
55 |
+
prompt += """
|
56 |
+
Provide a detailed diagnosis including:
|
57 |
+
1. Symptoms
|
58 |
+
2. Causes and risk factors
|
59 |
+
3. Impact on crops
|
60 |
+
4. Treatment options (short-term & long-term)
|
61 |
+
5. Prevention strategies
|
62 |
+
"""
|
63 |
+
|
64 |
+
response = model.generate_content(prompt)
|
65 |
+
return response.text if response else "No response from Gemini."
|
66 |
+
except Exception as e:
|
67 |
+
return f"Error connecting to Gemini API: {str(e)}"
|
68 |
+
|
69 |
+
# Perform inference using YOLO
|
70 |
+
def inference(image):
|
71 |
+
"""Detect crop diseases in the given image."""
|
72 |
+
results = yolo_model(image, conf=0.4)
|
73 |
+
infer = np.zeros(image.shape, dtype=np.uint8)
|
74 |
+
detected_classes = []
|
75 |
+
class_names = {}
|
76 |
+
|
77 |
+
for r in results:
|
78 |
+
infer = r.plot()
|
79 |
+
class_names = r.names
|
80 |
+
detected_classes = r.boxes.cls.tolist()
|
81 |
+
|
82 |
+
return infer, detected_classes, class_names
|
83 |
+
|
84 |
+
# Convert text to speech
|
85 |
+
def text_to_speech(text, language="en"):
|
86 |
+
"""Convert text to speech using gTTS."""
|
87 |
+
try:
|
88 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
|
89 |
+
tts = gTTS(text=text, lang=language, slow=False)
|
90 |
+
tts.save(temp_audio.name)
|
91 |
+
|
92 |
+
with open(temp_audio.name, "rb") as audio_file:
|
93 |
+
audio_bytes = audio_file.read()
|
94 |
+
|
95 |
+
os.unlink(temp_audio.name)
|
96 |
+
return audio_bytes
|
97 |
+
except Exception as e:
|
98 |
+
st.error(f"Error generating speech: {str(e)}")
|
99 |
+
return None
|
100 |
+
|
101 |
+
# Initialize Streamlit UI
|
102 |
+
st.title("π± AI-Powered Crop Disease Detection & Diagnosis π¬")
|
103 |
+
|
104 |
+
# Sidebar settings
|
105 |
+
with st.sidebar:
|
106 |
+
st.header("Settings")
|
107 |
+
|
108 |
+
# Fake model selection (Still uses Gemini)
|
109 |
+
selected_model = st.selectbox("Choose Model", ["Gemini", "GPT-4", "Claude", "Llama 3", "Mistral"], help="This app always uses Gemini.")
|
110 |
+
|
111 |
+
confidence_threshold = st.slider("Detection Confidence Threshold", 0.0, 1.0, 0.4)
|
112 |
+
|
113 |
+
# Text-to-Speech Settings
|
114 |
+
tts_enabled = st.checkbox("Enable Text-to-Speech", value=True)
|
115 |
+
language = st.selectbox("Speech Language", options=["en", "es", "fr", "de"], format_func=lambda x: {
|
116 |
+
"en": "English",
|
117 |
+
"es": "Spanish",
|
118 |
+
"fr": "French",
|
119 |
+
"de": "German"
|
120 |
+
}[x])
|
121 |
+
|
122 |
+
if st.button("Clear Conversation History"):
|
123 |
+
st.session_state.conversation_history = {}
|
124 |
+
st.success("Conversation history cleared!")
|
125 |
+
|
126 |
+
# User context input
|
127 |
+
st.subheader("π Provide Initial Context or Ask a Question")
|
128 |
+
user_context = st.text_area("Enter any details, symptoms, or questions about the plant's condition.", placeholder="Example: My tomato plant leaves are turning yellow. Is it a disease or a nutrient deficiency?")
|
129 |
+
|
130 |
+
# Upload an image
|
131 |
+
uploaded_file = st.file_uploader("π€ Upload a plant image", type=["jpg", "jpeg", "png"])
|
132 |
+
|
133 |
+
if uploaded_file:
|
134 |
+
file_id = uploaded_file.name
|
135 |
+
|
136 |
+
# Initialize conversation history for this image if not set
|
137 |
+
if file_id not in st.session_state.conversation_history:
|
138 |
+
st.session_state.conversation_history[file_id] = []
|
139 |
+
|
140 |
+
# Convert file to image
|
141 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
142 |
+
img = cv2.imdecode(file_bytes, 1)
|
143 |
+
|
144 |
+
# Perform inference
|
145 |
+
processed_image, detected_classes, class_names = inference(img)
|
146 |
+
|
147 |
+
# Display processed image with detected diseases
|
148 |
+
st.image(processed_image, caption="π Detected Diseases", use_column_width=True)
|
149 |
+
|
150 |
+
if detected_classes:
|
151 |
+
detected_disease_names = [class_names[cls] for cls in detected_classes]
|
152 |
+
st.write(f"β
**Detected Diseases:** {', '.join(detected_disease_names)}")
|
153 |
+
|
154 |
+
# AI-generated diagnosis from Gemini
|
155 |
+
st.subheader("π AI Diagnosis")
|
156 |
+
with st.spinner("Generating diagnosis... π"):
|
157 |
+
diagnosis = generate_gemini_response(detected_disease_names, user_context, st.session_state.conversation_history[file_id])
|
158 |
+
|
159 |
+
# Save response to history
|
160 |
+
st.session_state.conversation_history[file_id].append({"question": user_context, "response": diagnosis})
|
161 |
+
|
162 |
+
# Display the diagnosis
|
163 |
+
st.write(diagnosis)
|
164 |
+
|
165 |
+
# Show past conversation history
|
166 |
+
if st.session_state.conversation_history[file_id]:
|
167 |
+
st.subheader("ποΈ Conversation History")
|
168 |
+
for i, entry in enumerate(st.session_state.conversation_history[file_id]):
|
169 |
+
with st.expander(f"Q{i+1}: {entry['question'][:50]}..."):
|
170 |
+
st.write("**User:**", entry["question"])
|
171 |
+
st.write("**AI:**", entry["response"])
|
172 |
+
|
173 |
+
# Convert diagnosis to speech if enabled
|
174 |
+
if tts_enabled:
|
175 |
+
if st.button("π Listen to Diagnosis"):
|
176 |
+
with st.spinner("Generating audio... π΅"):
|
177 |
+
audio_bytes = text_to_speech(diagnosis, language)
|
178 |
+
if audio_bytes:
|
179 |
+
st.audio(audio_bytes, format="audio/mp3")
|
180 |
+
|
181 |
+
else:
|
182 |
+
st.write("β No crop disease detected.")
|
183 |
+
|
184 |
+
# Instructions for users
|
185 |
+
st.markdown("""
|
186 |
+
---
|
187 |
+
### How to Use:
|
188 |
+
1. Upload an image of a plant leaf with suspected disease.
|
189 |
+
2. Provide context (optional) about symptoms or concerns.
|
190 |
+
3. The system detects the disease using AI.
|
191 |
+
4. Gemini generates a diagnosis with symptoms and treatments.
|
192 |
+
5. Ask follow-up questions, and the AI will remember previous responses.
|
193 |
+
6. Optionally, listen to the AI-generated diagnosis.
|
194 |
+
""")
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.41.1
|
2 |
+
numpy==1.26.4
|
3 |
+
opencv-python==4.9.0.80
|
4 |
+
pillow==10.4.0
|
5 |
+
ultralytics
|
6 |
+
ollama==0.4.5
|
7 |
+
gtts==2.5.1
|
8 |
+
bcrypt==4.2.1
|
9 |
+
googletrans-py==4.0.0
|
10 |
+
huggingface-hub>=0.19
|
11 |
+
hf-transfer>=0.1.4
|
12 |
+
protobuf<4
|
13 |
+
click<8.1
|
14 |
+
google-generativeai
|