Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -91,18 +91,12 @@ def get_step_info(step_number):
|
|
91 |
"Standard Time": "7 seconds",
|
92 |
"Video_substeps_expected": {
|
93 |
"0-3 seconds": "Technician unloads(removes) carcass(tire) from the machine."
|
94 |
-
}
|
95 |
-
"Potential_Delay_reasons": [
|
96 |
-
"Person not available in time(in 3 sec) to remove carcass.",
|
97 |
-
"Person is doing bead(ring) insertion before carcass unload causing unload to be delayed by more than 3 sec"
|
98 |
-
]
|
99 |
}
|
100 |
}
|
101 |
|
102 |
return step_details.get(step_number, {"Error": "Invalid step number. Please provide a valid step number."})
|
103 |
|
104 |
-
|
105 |
-
|
106 |
def load_video(video_data, strategy='chat'):
|
107 |
"""Loads and processes video data into a format suitable for model input."""
|
108 |
bridge.set_bridge('torch')
|
@@ -113,11 +107,14 @@ def load_video(video_data, strategy='chat'):
|
|
113 |
else:
|
114 |
decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
|
115 |
|
116 |
-
frame_id_list = []
|
117 |
total_frames = len(decord_vr)
|
|
|
|
|
|
|
118 |
timestamps = [i[0] for i in decord_vr.get_frame_timestamp(np.arange(total_frames))]
|
119 |
max_second = round(max(timestamps)) + 1
|
120 |
|
|
|
121 |
for second in range(max_second):
|
122 |
closest_num = min(timestamps, key=lambda x: abs(x - second))
|
123 |
index = timestamps.index(closest_num)
|
@@ -170,19 +167,19 @@ def predict(prompt, video_data, temperature, model, tokenizer):
|
|
170 |
|
171 |
gen_kwargs = {
|
172 |
"max_new_tokens": 2048,
|
173 |
-
"pad_token_id":
|
174 |
"top_k": 1,
|
175 |
"do_sample": False,
|
176 |
"top_p": 0.1,
|
177 |
-
"temperature":
|
178 |
}
|
179 |
|
180 |
with torch.no_grad():
|
181 |
outputs = model.generate(**inputs, **gen_kwargs)
|
182 |
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
183 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
184 |
|
185 |
-
return response
|
186 |
|
187 |
def get_analysis_prompt(step_number):
|
188 |
"""Constructs the prompt for analyzing delay reasons based on the selected step."""
|
@@ -193,102 +190,22 @@ def get_analysis_prompt(step_number):
|
|
193 |
|
194 |
step_name = step_info["Name"]
|
195 |
standard_time = step_info["Standard Time"]
|
196 |
-
|
197 |
-
|
198 |
-
return f"""
|
199 |
-
You are an AI expert system specialized in analyzing manufacturing processes and identifying production delays in tire manufacturing. Your role is to accurately classify delay reasons based on visual evidence from production line footage.
|
200 |
-
Task Context:
|
201 |
-
You are analyzing video footage from Step {step_number} of a tire manufacturing process where a delay has been detected. The step is called {step_name}, and its standard time is {standard_time}.
|
202 |
-
Required Analysis:
|
203 |
-
Carefully observe the video for visual cues indicating production interruption.
|
204 |
-
- If no person is visible in any of the frames, the reason probably might be due to their absence.
|
205 |
-
- If a person is visible in the video and is observed touching and modifying the layers of the tire, it indicates an issue with tire patching, and the person might be repairing it.
|
206 |
-
- Compare observed evidence against the following possible delay reasons:
|
207 |
-
- {analysis}
|
208 |
-
|
209 |
-
Following are the subactivities needs to happen in this step.
|
210 |
-
|
211 |
-
{get_step_info(step_number)}
|
212 |
|
213 |
-
|
214 |
-
|
215 |
-
["Delay in Bead Insertion", "Lack of raw material"],
|
216 |
-
["Inner Liner Adjustment by Technician", "Person rebuilding defective Tire Sections"],
|
217 |
-
["Manual Adjustment in Ply1 Apply", "Technician repairing defective Tire Sections"],
|
218 |
-
["Delay in Bead Set", "Lack of raw material"],
|
219 |
-
["Delay in Turnup", "Lack of raw material"],
|
220 |
-
["Person Repairing Sidewall", "Person rebuilding defective Tire Sections"],
|
221 |
-
["Delay in Sidewall Stitching", "Lack of raw material"],
|
222 |
-
["No person available to load Carcass", "No person available to collect tire"]
|
223 |
-
}
|
224 |
-
|
225 |
-
1. **Selected Reason:** [State the most likely reason from the given options]
|
226 |
-
2. **Visual Evidence:** [Describe specific visual cues that support your selection]
|
227 |
-
3. **Reasoning:** [Explain why this reason best matches the observed evidence]
|
228 |
-
4. **Alternative Analysis:** [Brief explanation of why other possible reasons are less likely]
|
229 |
-
|
230 |
-
Important: Base your analysis solely on visual evidence from the video. Focus on concrete, observable details rather than assumptions. Clearly state if no person or specific activity is observed.
|
231 |
-
"""
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
model, tokenizer = load_model()
|
236 |
-
|
237 |
-
def inference(video, step_number):
|
238 |
-
"""Analyzes video to predict possible issues based on the manufacturing step."""
|
239 |
-
try:
|
240 |
-
if not video:
|
241 |
-
return "Please upload a video first."
|
242 |
-
|
243 |
-
prompt = get_analysis_prompt(step_number)
|
244 |
-
temperature = 0.3
|
245 |
-
response = predict(prompt, video, temperature, model, tokenizer)
|
246 |
-
|
247 |
-
return response
|
248 |
-
except Exception as e:
|
249 |
-
return f"An error occurred during analysis: {str(e)}"
|
250 |
-
|
251 |
-
def create_interface():
|
252 |
-
"""Creates the Gradio interface for the Manufacturing Analysis System."""
|
253 |
-
with gr.Blocks() as demo:
|
254 |
-
gr.Markdown("""
|
255 |
-
# Manufacturing Analysis System
|
256 |
-
Upload a video of the manufacturing step and select the step number.
|
257 |
-
The system will analyze the video and provide observations.
|
258 |
-
""")
|
259 |
-
|
260 |
-
with gr.Row():
|
261 |
-
with gr.Column():
|
262 |
-
video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
|
263 |
-
step_number = gr.Dropdown(
|
264 |
-
choices=[f"Step {i}" for i in range(1, 9)],
|
265 |
-
label="Manufacturing Step"
|
266 |
-
)
|
267 |
-
analyze_btn = gr.Button("Analyze", variant="primary")
|
268 |
-
|
269 |
-
with gr.Column():
|
270 |
-
output = gr.Textbox(label="Analysis Result", lines=10)
|
271 |
-
|
272 |
-
gr.Examples(
|
273 |
-
examples=[
|
274 |
-
["7838_step2_2_eval.mp4", "Step 2"],
|
275 |
-
["7838_step6_2_eval.mp4", "Step 6"],
|
276 |
-
["7838_step8_1_eval.mp4", "Step 8"],
|
277 |
-
["7993_step6_3_eval.mp4", "Step 6"],
|
278 |
-
["7993_step8_3_eval.mp4", "Step 8"]
|
279 |
-
],
|
280 |
-
inputs=[video, step_number],
|
281 |
-
cache_examples=False
|
282 |
-
)
|
283 |
-
|
284 |
-
analyze_btn.click(
|
285 |
-
fn=inference,
|
286 |
-
inputs=[video, step_number],
|
287 |
-
outputs=[output]
|
288 |
-
)
|
289 |
|
290 |
-
return
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
"Standard Time": "7 seconds",
|
92 |
"Video_substeps_expected": {
|
93 |
"0-3 seconds": "Technician unloads(removes) carcass(tire) from the machine."
|
94 |
+
}
|
|
|
|
|
|
|
|
|
95 |
}
|
96 |
}
|
97 |
|
98 |
return step_details.get(step_number, {"Error": "Invalid step number. Please provide a valid step number."})
|
99 |
|
|
|
|
|
100 |
def load_video(video_data, strategy='chat'):
|
101 |
"""Loads and processes video data into a format suitable for model input."""
|
102 |
bridge.set_bridge('torch')
|
|
|
107 |
else:
|
108 |
decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
|
109 |
|
|
|
110 |
total_frames = len(decord_vr)
|
111 |
+
if total_frames < num_frames:
|
112 |
+
raise ValueError("Uploaded video is too short for meaningful analysis.")
|
113 |
+
|
114 |
timestamps = [i[0] for i in decord_vr.get_frame_timestamp(np.arange(total_frames))]
|
115 |
max_second = round(max(timestamps)) + 1
|
116 |
|
117 |
+
frame_id_list = []
|
118 |
for second in range(max_second):
|
119 |
closest_num = min(timestamps, key=lambda x: abs(x - second))
|
120 |
index = timestamps.index(closest_num)
|
|
|
167 |
|
168 |
gen_kwargs = {
|
169 |
"max_new_tokens": 2048,
|
170 |
+
"pad_token_id": tokenizer.pad_token_id,
|
171 |
"top_k": 1,
|
172 |
"do_sample": False,
|
173 |
"top_p": 0.1,
|
174 |
+
"temperature": 0.3,
|
175 |
}
|
176 |
|
177 |
with torch.no_grad():
|
178 |
outputs = model.generate(**inputs, **gen_kwargs)
|
179 |
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
180 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
181 |
|
182 |
+
return f"Analysis Result:\n{response}"
|
183 |
|
184 |
def get_analysis_prompt(step_number):
|
185 |
"""Constructs the prompt for analyzing delay reasons based on the selected step."""
|
|
|
190 |
|
191 |
step_name = step_info["Name"]
|
192 |
standard_time = step_info["Standard Time"]
|
193 |
+
substeps = step_info["Video_substeps_expected"]
|
194 |
+
delay_reasons = DELAY_REASONS.get(f"Step {step_number}", ["No specific reasons provided."])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
+
substeps_text = "\n".join([f"- {time}: {action}" for time, action in substeps.items()])
|
197 |
+
reasons_text = "\n".join([f"- {reason}" for reason in delay_reasons])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
|
199 |
+
return f"""
|
200 |
+
You are an AI expert system analyzing manufacturing delays in tire production. Below are the details:
|
201 |
+
Step: {step_number} - {step_name}
|
202 |
+
Standard Time: {standard_time}
|
203 |
+
Substeps Expected in Video:
|
204 |
+
{substeps_text}
|
205 |
+
|
206 |
+
Potential Delay Reasons:
|
207 |
+
{reasons_text}
|
208 |
+
|
209 |
+
Task: Analyze the provided video to identify the delay reason. Use the following format:
|
210 |
+
1. **Selected Reason:** [Choose the most likely reason from the list above]
|
211 |
+
2. **Visual Evidence:** [Describe specific visual cues from the
|