# app.py - FactoryGPT 5.0: Predictive Maintenance with Technical Expert GPT
import streamlit as st
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from transformers import pipeline
from sklearn.ensemble import IsolationForest
# Page setup
st.set_page_config(
page_title="FactoryGPT 5.0 – Predictive Maintenance Expert",
page_icon="🧠",
layout="wide"
)
# Dark mode CSS
st.markdown("""
""", unsafe_allow_html=True)
# Header
st.markdown("""
🏭 FactoryGPT 5.0 – Predictive Maintenance Assistant
Sensor-Driven Diagnostics | Role-Based Expert Assistant | Industry 5.0 Ready
""", unsafe_allow_html=True)
# Load NLP models
EMBED_MODEL = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
GEN_MODEL = pipeline('text2text-generation', model='google/flan-t5-base')
# Upload data
uploaded_file = st.sidebar.file_uploader("📂 Upload your sensor CSV", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
numeric_cols = df.select_dtypes(include=np.number).columns.tolist()
st.success("✅ Sensor log loaded!")
st.markdown("### 📊 Sensor Data Preview")
st.dataframe(df.head(), use_container_width=True)
# Prepare vector chunks
def convert_to_chunks(df):
return [f"[Entry {i}] " + ", ".join([f"{col}: {row[col]:.2f}" for col in numeric_cols]) for i, row in df.iterrows()]
if 'chunks' not in st.session_state or 'embeddings' not in st.session_state:
chunks = convert_to_chunks(df)
embeddings = EMBED_MODEL.encode(chunks)
st.session_state.chunks = chunks
st.session_state.embeddings = embeddings
# Predict machine status
st.markdown("### 🔍 Equipment Status Prediction")
iso = IsolationForest(contamination=0.02)
labels = iso.fit_predict(df[numeric_cols])
df['status'] = ['❌ Faulty' if x == -1 else '✅ OK' for x in labels]
df['maintenance_flag'] = ['🔧 Action Required' if x == -1 else '🟢 Normal' for x in labels]
st.dataframe(df[['status', 'maintenance_flag'] + numeric_cols].head(), use_container_width=True)
# Role-based assistant
st.markdown("### 💬 Role-Based Technical Assistant")
roles = {
"Operator": (
"You are a senior machine operator. Based on sensor logs, identify abnormal conditions in real-time operation. "
"Explain deviations clearly and suggest next steps to the floor team."
),
"Maintenance": (
"You are a certified maintenance technician. Analyze anomalous sensor readings, reference maintenance schedules, "
"and recommend immediate actions or predictive interventions with justification."
),
"Engineer": (
"You are a reliability and control systems engineer. Provide a detailed root cause analysis from the sensor logs, "
"explain what failure modes may be emerging (e.g., torque drift, PID lag), and propose technical mitigations."
)
}
role = st.selectbox("👤 Select your role for technical insights", list(roles.keys()))
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
user_input = st.text_input("🧠 Ask FactoryGPT a technical question about the machine behavior")
if user_input:
# Retrieve relevant context from logs
query_vec = EMBED_MODEL.encode([user_input])[0]
sims = np.dot(st.session_state.embeddings, query_vec)
top_idxs = np.argsort(sims)[-5:][::-1]
context = "\n".join([st.session_state.chunks[i] for i in top_idxs])
# Prompt Engineering
system_prompt = (
f"ROLE CONTEXT: {roles[role]}\n\n"
f"DATA CONTEXT:\n{context}\n\n"
f"INSTRUCTION:\nPlease respond with a technical explanation that includes sensor-based reasoning, terminology, and if possible, an action plan.\n"
)
full_prompt = f"{system_prompt}\nUser Query: {user_input}"
reply = GEN_MODEL(full_prompt, max_length=350)[0]['generated_text']
# Record conversation
st.session_state.chat_history.append((f"👤 You ({role})", user_input))
st.session_state.chat_history.append(("🤖 FactoryGPT", reply))
# Show chat
for speaker, msg in st.session_state.chat_history[-10:]:
st.markdown(f"{speaker}:
{msg}
", unsafe_allow_html=True)
else:
st.info("👈 Please upload a CSV file containing numeric sensor logs to begin analysis.")