File size: 5,077 Bytes
5f49e69
c2295d1
5ea6795
 
 
 
 
c2295d1
5ea6795
75b6b4d
d3a8296
5f49e69
d3a8296
 
 
0cd7d19
75b6b4d
0cd7d19
7fdcd7c
75b6b4d
7fdcd7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f49e69
7fdcd7c
 
5f49e69
 
7fdcd7c
 
0cd7d19
5ea6795
5f49e69
5ea6795
 
 
5f49e69
7fdcd7c
5ea6795
6141da1
 
c2295d1
7fdcd7c
c2295d1
5f49e69
7fdcd7c
c2295d1
5f49e69
6141da1
5f49e69
5ea6795
b365915
 
 
 
 
5ea6795
5f49e69
 
c2295d1
0cd7d19
5f49e69
 
 
0cd7d19
5f49e69
 
ff73cbe
5f49e69
 
 
 
 
 
 
 
 
 
 
 
ff73cbe
7fdcd7c
5f49e69
ff73cbe
 
 
 
5f49e69
ff73cbe
 
5f49e69
ff73cbe
 
5f49e69
ff73cbe
 
5f49e69
 
 
 
 
 
 
 
 
 
0cd7d19
 
ff73cbe
5f49e69
ff73cbe
5f49e69
ff73cbe
6141da1
5f49e69
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# app.py - FactoryGPT 5.0: Predictive Maintenance with Technical Expert GPT

import streamlit as st
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from transformers import pipeline
from sklearn.ensemble import IsolationForest

# Page setup
st.set_page_config(
    page_title="FactoryGPT 5.0 – Predictive Maintenance Expert",
    page_icon="🧠",
    layout="wide"
)

# Dark mode CSS
st.markdown("""
    <style>
    html, body, [class*="css"] {
        font-family: 'Segoe UI', sans-serif;
        background-color: #0f1117;
        color: #f0f0f0;
    }
    .stTextInput>div>div>input,
    .stSelectbox>div>div>div>div {
        background-color: #1a1c23;
        color: #fff;
    }
    .stDataFrame .blank {
        background-color: #0f1117 !important;
    }
    </style>
""", unsafe_allow_html=True)

# Header
st.markdown("""
    <div style='text-align: center;'>
        <h1 style='color: #58a6ff;'>🏭 FactoryGPT 5.0 – Predictive Maintenance Assistant</h1>
        <p style='color: #bbb;'>Sensor-Driven Diagnostics | Role-Based Expert Assistant | Industry 5.0 Ready</p>
        <hr style='border-top: 2px solid #888;'>
    </div>
""", unsafe_allow_html=True)

# Load NLP models
EMBED_MODEL = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
GEN_MODEL = pipeline('text2text-generation', model='google/flan-t5-base')

# Upload data
uploaded_file = st.sidebar.file_uploader("πŸ“‚ Upload your sensor CSV", type=["csv"])

if uploaded_file:
    df = pd.read_csv(uploaded_file)
    numeric_cols = df.select_dtypes(include=np.number).columns.tolist()
    st.success("βœ… Sensor log loaded!")

    st.markdown("### πŸ“Š Sensor Data Preview")
    st.dataframe(df.head(), use_container_width=True)

    # Prepare vector chunks
    def convert_to_chunks(df):
        return [f"[Entry {i}] " + ", ".join([f"{col}: {row[col]:.2f}" for col in numeric_cols]) for i, row in df.iterrows()]

    if 'chunks' not in st.session_state or 'embeddings' not in st.session_state:
        chunks = convert_to_chunks(df)
        embeddings = EMBED_MODEL.encode(chunks)
        st.session_state.chunks = chunks
        st.session_state.embeddings = embeddings

    # Predict machine status
    st.markdown("### πŸ” Equipment Status Prediction")
    iso = IsolationForest(contamination=0.02)
    labels = iso.fit_predict(df[numeric_cols])
    df['status'] = ['❌ Faulty' if x == -1 else 'βœ… OK' for x in labels]
    df['maintenance_flag'] = ['πŸ”§ Action Required' if x == -1 else '🟒 Normal' for x in labels]
    st.dataframe(df[['status', 'maintenance_flag'] + numeric_cols].head(), use_container_width=True)

    # Role-based assistant
    st.markdown("### πŸ’¬ Role-Based Technical Assistant")
    roles = {
        "Operator": (
            "You are a senior machine operator. Based on sensor logs, identify abnormal conditions in real-time operation. "
            "Explain deviations clearly and suggest next steps to the floor team."
        ),
        "Maintenance": (
            "You are a certified maintenance technician. Analyze anomalous sensor readings, reference maintenance schedules, "
            "and recommend immediate actions or predictive interventions with justification."
        ),
        "Engineer": (
            "You are a reliability and control systems engineer. Provide a detailed root cause analysis from the sensor logs, "
            "explain what failure modes may be emerging (e.g., torque drift, PID lag), and propose technical mitigations."
        )
    }

    role = st.selectbox("πŸ‘€ Select your role for technical insights", list(roles.keys()))

    if 'chat_history' not in st.session_state:
        st.session_state.chat_history = []

    user_input = st.text_input("🧠 Ask FactoryGPT a technical question about the machine behavior")

    if user_input:
        # Retrieve relevant context from logs
        query_vec = EMBED_MODEL.encode([user_input])[0]
        sims = np.dot(st.session_state.embeddings, query_vec)
        top_idxs = np.argsort(sims)[-5:][::-1]
        context = "\n".join([st.session_state.chunks[i] for i in top_idxs])

        # Prompt Engineering
        system_prompt = (
            f"ROLE CONTEXT: {roles[role]}\n\n"
            f"DATA CONTEXT:\n{context}\n\n"
            f"INSTRUCTION:\nPlease respond with a technical explanation that includes sensor-based reasoning, terminology, and if possible, an action plan.\n"
        )
        full_prompt = f"{system_prompt}\nUser Query: {user_input}"
        reply = GEN_MODEL(full_prompt, max_length=350)[0]['generated_text']

        # Record conversation
        st.session_state.chat_history.append((f"πŸ‘€ You ({role})", user_input))
        st.session_state.chat_history.append(("πŸ€– FactoryGPT", reply))

    # Show chat
    for speaker, msg in st.session_state.chat_history[-10:]:
        st.markdown(f"<div style='margin-bottom: 10px;'><b>{speaker}:</b><br>{msg}</div>", unsafe_allow_html=True)

else:
    st.info("πŸ‘ˆ Please upload a CSV file containing numeric sensor logs to begin analysis.")