Spaces:
Sleeping
Sleeping
# app.py - FactoryGPT 5.0: Predictive Maintenance with Technical Expert GPT | |
import streamlit as st | |
import pandas as pd | |
import numpy as np | |
from sentence_transformers import SentenceTransformer | |
from transformers import pipeline | |
from sklearn.ensemble import IsolationForest | |
# Page setup | |
st.set_page_config( | |
page_title="FactoryGPT 5.0 β Predictive Maintenance Expert", | |
page_icon="π§ ", | |
layout="wide" | |
) | |
# Dark mode CSS | |
st.markdown(""" | |
<style> | |
html, body, [class*="css"] { | |
font-family: 'Segoe UI', sans-serif; | |
background-color: #0f1117; | |
color: #f0f0f0; | |
} | |
.stTextInput>div>div>input, | |
.stSelectbox>div>div>div>div { | |
background-color: #1a1c23; | |
color: #fff; | |
} | |
.stDataFrame .blank { | |
background-color: #0f1117 !important; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# Header | |
st.markdown(""" | |
<div style='text-align: center;'> | |
<h1 style='color: #58a6ff;'>π FactoryGPT 5.0 β Predictive Maintenance Assistant</h1> | |
<p style='color: #bbb;'>Sensor-Driven Diagnostics | Role-Based Expert Assistant | Industry 5.0 Ready</p> | |
<hr style='border-top: 2px solid #888;'> | |
</div> | |
""", unsafe_allow_html=True) | |
# Load NLP models | |
EMBED_MODEL = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
GEN_MODEL = pipeline('text2text-generation', model='google/flan-t5-base') | |
# Upload data | |
uploaded_file = st.sidebar.file_uploader("π Upload your sensor CSV", type=["csv"]) | |
if uploaded_file: | |
df = pd.read_csv(uploaded_file) | |
numeric_cols = df.select_dtypes(include=np.number).columns.tolist() | |
st.success("β Sensor log loaded!") | |
st.markdown("### π Sensor Data Preview") | |
st.dataframe(df.head(), use_container_width=True) | |
# Prepare vector chunks | |
def convert_to_chunks(df): | |
return [f"[Entry {i}] " + ", ".join([f"{col}: {row[col]:.2f}" for col in numeric_cols]) for i, row in df.iterrows()] | |
if 'chunks' not in st.session_state or 'embeddings' not in st.session_state: | |
chunks = convert_to_chunks(df) | |
embeddings = EMBED_MODEL.encode(chunks) | |
st.session_state.chunks = chunks | |
st.session_state.embeddings = embeddings | |
# Predict machine status | |
st.markdown("### π Equipment Status Prediction") | |
iso = IsolationForest(contamination=0.02) | |
labels = iso.fit_predict(df[numeric_cols]) | |
df['status'] = ['β Faulty' if x == -1 else 'β OK' for x in labels] | |
df['maintenance_flag'] = ['π§ Action Required' if x == -1 else 'π’ Normal' for x in labels] | |
st.dataframe(df[['status', 'maintenance_flag'] + numeric_cols].head(), use_container_width=True) | |
# Role-based assistant | |
st.markdown("### π¬ Role-Based Technical Assistant") | |
roles = { | |
"Operator": ( | |
"You are a senior machine operator. Based on sensor logs, identify abnormal conditions in real-time operation. " | |
"Explain deviations clearly and suggest next steps to the floor team." | |
), | |
"Maintenance": ( | |
"You are a certified maintenance technician. Analyze anomalous sensor readings, reference maintenance schedules, " | |
"and recommend immediate actions or predictive interventions with justification." | |
), | |
"Engineer": ( | |
"You are a reliability and control systems engineer. Provide a detailed root cause analysis from the sensor logs, " | |
"explain what failure modes may be emerging (e.g., torque drift, PID lag), and propose technical mitigations." | |
) | |
} | |
role = st.selectbox("π€ Select your role for technical insights", list(roles.keys())) | |
if 'chat_history' not in st.session_state: | |
st.session_state.chat_history = [] | |
user_input = st.text_input("π§ Ask FactoryGPT a technical question about the machine behavior") | |
if user_input: | |
# Retrieve relevant context from logs | |
query_vec = EMBED_MODEL.encode([user_input])[0] | |
sims = np.dot(st.session_state.embeddings, query_vec) | |
top_idxs = np.argsort(sims)[-5:][::-1] | |
context = "\n".join([st.session_state.chunks[i] for i in top_idxs]) | |
# Prompt Engineering | |
system_prompt = ( | |
f"ROLE CONTEXT: {roles[role]}\n\n" | |
f"DATA CONTEXT:\n{context}\n\n" | |
f"INSTRUCTION:\nPlease respond with a technical explanation that includes sensor-based reasoning, terminology, and if possible, an action plan.\n" | |
) | |
full_prompt = f"{system_prompt}\nUser Query: {user_input}" | |
reply = GEN_MODEL(full_prompt, max_length=350)[0]['generated_text'] | |
# Record conversation | |
st.session_state.chat_history.append((f"π€ You ({role})", user_input)) | |
st.session_state.chat_history.append(("π€ FactoryGPT", reply)) | |
# Show chat | |
for speaker, msg in st.session_state.chat_history[-10:]: | |
st.markdown(f"<div style='margin-bottom: 10px;'><b>{speaker}:</b><br>{msg}</div>", unsafe_allow_html=True) | |
else: | |
st.info("π Please upload a CSV file containing numeric sensor logs to begin analysis.") | |