Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# app.py - FactoryGPT 5.0:
|
2 |
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
@@ -7,14 +7,14 @@ from sentence_transformers import SentenceTransformer
|
|
7 |
from transformers import pipeline
|
8 |
from sklearn.ensemble import IsolationForest
|
9 |
|
10 |
-
#
|
11 |
st.set_page_config(
|
12 |
-
page_title="FactoryGPT 5.0 β
|
13 |
page_icon="π§ ",
|
14 |
layout="wide"
|
15 |
)
|
16 |
|
17 |
-
#
|
18 |
st.markdown("""
|
19 |
<style>
|
20 |
html, body, [class*="css"] {
|
@@ -36,28 +36,28 @@ st.markdown("""
|
|
36 |
# Header
|
37 |
st.markdown("""
|
38 |
<div style='text-align: center;'>
|
39 |
-
<h1 style='color: #58a6ff;'>π FactoryGPT 5.0 β Predictive Maintenance
|
40 |
-
<p style='color: #bbb;'>
|
41 |
<hr style='border-top: 2px solid #888;'>
|
42 |
</div>
|
43 |
""", unsafe_allow_html=True)
|
44 |
|
45 |
-
# Load NLP
|
46 |
EMBED_MODEL = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
47 |
GEN_MODEL = pipeline('text2text-generation', model='google/flan-t5-base')
|
48 |
|
49 |
-
# Upload
|
50 |
-
uploaded_file = st.sidebar.file_uploader("π Upload
|
51 |
|
52 |
if uploaded_file:
|
53 |
df = pd.read_csv(uploaded_file)
|
54 |
numeric_cols = df.select_dtypes(include=np.number).columns.tolist()
|
55 |
-
st.success("β
Sensor log loaded
|
56 |
|
57 |
-
st.markdown("### π Sensor Data
|
58 |
st.dataframe(df.head(), use_container_width=True)
|
59 |
|
60 |
-
#
|
61 |
def convert_to_chunks(df):
|
62 |
return [f"[Entry {i}] " + ", ".join([f"{col}: {row[col]:.2f}" for col in numeric_cols]) for i, row in df.iterrows()]
|
63 |
|
@@ -67,61 +67,69 @@ if uploaded_file:
|
|
67 |
st.session_state.chunks = chunks
|
68 |
st.session_state.embeddings = embeddings
|
69 |
|
70 |
-
#
|
71 |
-
st.markdown("###
|
72 |
iso = IsolationForest(contamination=0.02)
|
73 |
labels = iso.fit_predict(df[numeric_cols])
|
74 |
-
df['status'] = ['β
|
75 |
-
df['maintenance_flag'] = ['π§
|
76 |
st.dataframe(df[['status', 'maintenance_flag'] + numeric_cols].head(), use_container_width=True)
|
77 |
|
78 |
-
# Role-based
|
79 |
-
st.markdown("### π¬
|
80 |
roles = {
|
81 |
-
"Operator":
|
82 |
-
"
|
83 |
-
"
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
"
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
}
|
94 |
|
95 |
-
role = st.selectbox("π€ Select your role
|
96 |
|
97 |
if 'chat_history' not in st.session_state:
|
98 |
st.session_state.chat_history = []
|
99 |
|
100 |
-
user_input = st.text_input("π§ Ask
|
101 |
|
102 |
if user_input:
|
103 |
-
# Retrieve relevant context from logs
|
104 |
query_vec = EMBED_MODEL.encode([user_input])[0]
|
105 |
sims = np.dot(st.session_state.embeddings, query_vec)
|
106 |
top_idxs = np.argsort(sims)[-5:][::-1]
|
107 |
context = "\n".join([st.session_state.chunks[i] for i in top_idxs])
|
108 |
|
109 |
-
# Prompt
|
110 |
-
|
111 |
-
f"ROLE
|
|
|
|
|
112 |
f"DATA CONTEXT:\n{context}\n\n"
|
113 |
-
f"
|
|
|
114 |
)
|
115 |
-
|
116 |
-
reply = GEN_MODEL(full_prompt, max_length=350)[0]['generated_text']
|
117 |
|
118 |
-
|
119 |
-
st.session_state.chat_history.append((
|
120 |
-
st.session_state.chat_history.append(("π€ FactoryGPT", reply))
|
121 |
|
122 |
-
# Show chat
|
123 |
for speaker, msg in st.session_state.chat_history[-10:]:
|
124 |
st.markdown(f"<div style='margin-bottom: 10px;'><b>{speaker}:</b><br>{msg}</div>", unsafe_allow_html=True)
|
125 |
|
126 |
else:
|
127 |
-
st.info("π
|
|
|
1 |
+
# app.py - FactoryGPT 5.0: Role-Differentiated Technical Assistant
|
2 |
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
|
|
7 |
from transformers import pipeline
|
8 |
from sklearn.ensemble import IsolationForest
|
9 |
|
10 |
+
# App Config
|
11 |
st.set_page_config(
|
12 |
+
page_title="FactoryGPT 5.0 β Technical Role-Based Maintenance Assistant",
|
13 |
page_icon="π§ ",
|
14 |
layout="wide"
|
15 |
)
|
16 |
|
17 |
+
# Styling
|
18 |
st.markdown("""
|
19 |
<style>
|
20 |
html, body, [class*="css"] {
|
|
|
36 |
# Header
|
37 |
st.markdown("""
|
38 |
<div style='text-align: center;'>
|
39 |
+
<h1 style='color: #58a6ff;'>π FactoryGPT 5.0 β Predictive Maintenance AI</h1>
|
40 |
+
<p style='color: #bbb;'>Role-Based Technical Assistant β’ Sensor Intelligence β’ Industry 5.0</p>
|
41 |
<hr style='border-top: 2px solid #888;'>
|
42 |
</div>
|
43 |
""", unsafe_allow_html=True)
|
44 |
|
45 |
+
# Load NLP Models
|
46 |
EMBED_MODEL = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
47 |
GEN_MODEL = pipeline('text2text-generation', model='google/flan-t5-base')
|
48 |
|
49 |
+
# File Upload
|
50 |
+
uploaded_file = st.sidebar.file_uploader("π Upload sensor log (CSV)", type=["csv"])
|
51 |
|
52 |
if uploaded_file:
|
53 |
df = pd.read_csv(uploaded_file)
|
54 |
numeric_cols = df.select_dtypes(include=np.number).columns.tolist()
|
55 |
+
st.success("β
Sensor log successfully loaded.")
|
56 |
|
57 |
+
st.markdown("### π Sensor Data Snapshot")
|
58 |
st.dataframe(df.head(), use_container_width=True)
|
59 |
|
60 |
+
# Vector Chunking for RAG
|
61 |
def convert_to_chunks(df):
|
62 |
return [f"[Entry {i}] " + ", ".join([f"{col}: {row[col]:.2f}" for col in numeric_cols]) for i, row in df.iterrows()]
|
63 |
|
|
|
67 |
st.session_state.chunks = chunks
|
68 |
st.session_state.embeddings = embeddings
|
69 |
|
70 |
+
# Predictive Anomaly Detection
|
71 |
+
st.markdown("### π Machine Health Assessment")
|
72 |
iso = IsolationForest(contamination=0.02)
|
73 |
labels = iso.fit_predict(df[numeric_cols])
|
74 |
+
df['status'] = ['β Fault Detected' if x == -1 else 'β
Healthy' for x in labels]
|
75 |
+
df['maintenance_flag'] = ['π§ Inspect Required' if x == -1 else 'π’ Stable' for x in labels]
|
76 |
st.dataframe(df[['status', 'maintenance_flag'] + numeric_cols].head(), use_container_width=True)
|
77 |
|
78 |
+
# Role-based Response Logic
|
79 |
+
st.markdown("### π¬ Technical Assistant by Role")
|
80 |
roles = {
|
81 |
+
"Operator": {
|
82 |
+
"description": "Focus on simple equipment behavior. Identify unusual patterns and suggest if operator actions are needed.",
|
83 |
+
"style": (
|
84 |
+
"Explain in clear and simple terms. Focus on sensor changes, safe operation, and when to escalate to maintenance. "
|
85 |
+
"Avoid deep technical jargon."
|
86 |
+
)
|
87 |
+
},
|
88 |
+
"Maintenance": {
|
89 |
+
"description": "Examine possible fault conditions. Reference symptoms, parts affected, and recommend procedural maintenance actions.",
|
90 |
+
"style": (
|
91 |
+
"Give a technical breakdown of what components may be degrading. Recommend maintenance steps and cite related sensor evidence. "
|
92 |
+
"Use concise, technician-friendly language."
|
93 |
+
)
|
94 |
+
},
|
95 |
+
"Engineer": {
|
96 |
+
"description": "Perform data-driven diagnostics. Detect systemic issues, control instability, or potential root causes.",
|
97 |
+
"style": (
|
98 |
+
"Provide a structured root cause analysis using engineering language. Mention potential failure modes (e.g., drift, thermal loss, PID lag), "
|
99 |
+
"anomaly thresholds, and corrective engineering strategies."
|
100 |
+
)
|
101 |
+
}
|
102 |
}
|
103 |
|
104 |
+
role = st.selectbox("π€ Select your role", roles.keys())
|
105 |
|
106 |
if 'chat_history' not in st.session_state:
|
107 |
st.session_state.chat_history = []
|
108 |
|
109 |
+
user_input = st.text_input("π§ Ask a question about equipment behavior")
|
110 |
|
111 |
if user_input:
|
|
|
112 |
query_vec = EMBED_MODEL.encode([user_input])[0]
|
113 |
sims = np.dot(st.session_state.embeddings, query_vec)
|
114 |
top_idxs = np.argsort(sims)[-5:][::-1]
|
115 |
context = "\n".join([st.session_state.chunks[i] for i in top_idxs])
|
116 |
|
117 |
+
# Build Prompt
|
118 |
+
system_instruction = (
|
119 |
+
f"ROLE: {role}\n"
|
120 |
+
f"RESPONSIBILITIES: {roles[role]['description']}\n"
|
121 |
+
f"COMMUNICATION STYLE: {roles[role]['style']}\n\n"
|
122 |
f"DATA CONTEXT:\n{context}\n\n"
|
123 |
+
f"QUESTION:\n{user_input}\n\n"
|
124 |
+
f"ANSWER AS {role.upper()}:\n"
|
125 |
)
|
126 |
+
response = GEN_MODEL(system_instruction, max_length=400)[0]['generated_text']
|
|
|
127 |
|
128 |
+
st.session_state.chat_history.append((f"π€ {role}", user_input))
|
129 |
+
st.session_state.chat_history.append(("π€ FactoryGPT", response))
|
|
|
130 |
|
|
|
131 |
for speaker, msg in st.session_state.chat_history[-10:]:
|
132 |
st.markdown(f"<div style='margin-bottom: 10px;'><b>{speaker}:</b><br>{msg}</div>", unsafe_allow_html=True)
|
133 |
|
134 |
else:
|
135 |
+
st.info("π Upload a CSV sensor log to begin.")
|