VishalD1234 commited on
Commit
c0b4a24
·
verified ·
1 Parent(s): 260aafd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -50
app.py CHANGED
@@ -6,29 +6,99 @@ from decord import cpu, VideoReader, bridge
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from transformers import BitsAndBytesConfig
8
 
9
-
10
  MODEL_PATH = "THUDM/cogvlm2-video-llama3-chat"
11
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
12
  TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- DELAY_REASONS = {
16
- "Step 1": ["Delay in Bead Insertion","Lack of raw material"],
17
- "Step 2": ["Inner Liner Adjustment by Technician","Person rebuilding defective Tire Sections"],
18
- "Step 3": ["Manual Adjustment in Ply1 apply","Technician repairing defective Tire Sections"],
19
- "Step 4": ["Delay in Bead set","Lack of raw material"],
20
- "Step 5": ["Delay in Turnup","Lack of raw material"],
21
- "Step 6": ["Person Repairing sidewall","Person rebuilding defective Tire Sections"],
22
- "Step 7": ["Delay in sidewall stitching","Lack of raw material"],
23
- "Step 8": ["No person available to load Carcass","No person available to collect tire"]
24
- }
25
 
26
  def load_video(video_data, strategy='chat'):
27
  """Loads and processes video data into a format suitable for model input."""
28
  bridge.set_bridge('torch')
29
  num_frames = 24
30
 
31
- if isinstance(video_data, str):
32
  decord_vr = VideoReader(video_data, ctx=cpu(0))
33
  else:
34
  decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
@@ -104,38 +174,58 @@ def predict(prompt, video_data, temperature, model, tokenizer):
104
 
105
  return response
106
 
107
- def get_analysis_prompt(step_number, possible_reasons):
108
  """Constructs the prompt for analyzing delay reasons based on the selected step."""
109
- return f"""You are an AI expert system specialized in analyzing manufacturing processes and identifying production delays in tire manufacturing. Your role is to accurately classify delay reasons based on visual evidence from production line footage.
 
 
 
 
 
 
 
 
 
 
110
  Task Context:
111
- You are analyzing video footage from Step {step_number} of a tire manufacturing process where a delay has been detected. Your task is to determine the most likely cause of the delay from the following possible reasons:
112
- {', '.join(possible_reasons)}
113
  Required Analysis:
114
- Carefully observe the video for visual cues indicating production interruption.
115
- If no person is visible in any of the frames, the reason probably might be due to his absence.
116
- If a person is visible in the video and is observed touching and modifying the layers of the tire, it means there is a issue with tyre being patched hence he is repairing it.
117
- Compare observed evidence against each possible delay reason.
118
- Select the most likely reason based on visual evidence.
119
- Please provide your analysis in the following format:
120
- 1. Selected Reason: [State the most likely reason from the given options]
121
- 2. Visual Evidence: [Describe specific visual cues that support your selection]
122
- 3. Reasoning: [Explain why this reason best matches the observed evidence]
123
- 4. Alternative Analysis: [Brief explanation of why other possible reasons are less likely]
124
- Important: Base your analysis solely on visual evidence from the video. Focus on concrete, observable details rather than assumptions. Clearly state if no person or specific activity is observed."""
125
-
126
-
127
- # Load model globally
 
 
 
 
 
 
 
 
 
 
 
 
128
  model, tokenizer = load_model()
129
 
130
  def inference(video, step_number):
131
- """Analyzes video to predict the most likely cause of delay in the selected manufacturing step."""
132
  try:
133
  if not video:
134
  return "Please upload a video first."
135
 
136
- possible_reasons = DELAY_REASONS[step_number]
137
- prompt = get_analysis_prompt(step_number, possible_reasons)
138
- temperature = 0.8
139
  response = predict(prompt, video, temperature, model, tokenizer)
140
 
141
  return response
@@ -143,38 +233,34 @@ def inference(video, step_number):
143
  return f"An error occurred during analysis: {str(e)}"
144
 
145
  def create_interface():
146
- """Creates the Gradio interface for the Manufacturing Delay Analysis System with examples."""
147
  with gr.Blocks() as demo:
148
  gr.Markdown("""
149
- # Manufacturing Delay Analysis System
150
  Upload a video of the manufacturing step and select the step number.
151
- The system will analyze the video and determine the most likely cause of delay.
152
  """)
153
 
154
  with gr.Row():
155
  with gr.Column():
156
  video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
157
  step_number = gr.Dropdown(
158
- choices=list(DELAY_REASONS.keys()),
159
  label="Manufacturing Step"
160
  )
161
- analyze_btn = gr.Button("Analyze Delay", variant="primary")
162
 
163
  with gr.Column():
164
  output = gr.Textbox(label="Analysis Result", lines=10)
165
 
166
- # Add examples
167
- examples = [
168
- ["7838_step2_2_eval.mp4", "Step 2"],
169
- ["7838_step6_2_eval.mp4", "Step 6"],
170
- ["7838_step8_1_eval.mp4", "Step 8"],
171
- ["7993_step6_3_eval.mp4", "Step 6"],
172
- ["7993_step8_3_eval.mp4", "Step 8"]
173
-
174
- ]
175
-
176
  gr.Examples(
177
- examples=examples,
 
 
 
 
 
 
178
  inputs=[video, step_number],
179
  cache_examples=False
180
  )
 
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from transformers import BitsAndBytesConfig
8
 
 
9
  MODEL_PATH = "THUDM/cogvlm2-video-llama3-chat"
10
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
11
  TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
12
 
13
+ # Delay Reasons for Each Manufacturing Step
14
+
15
+ def get_step_info(step_number):
16
+ """Returns detailed information about a manufacturing step."""
17
+ step_details = {
18
+ 1: {
19
+ "Name": "Bead Insertion",
20
+ "Standard Time": "4 seconds",
21
+ "Video_substeps_expected": {
22
+ "0-1 second": "Machine starts bead insertion process.",
23
+ "1-3 seconds": "Beads are aligned and positioned.",
24
+ "3-4 seconds": "Final adjustment and confirmation of bead placement."
25
+ }
26
+ },
27
+ 2: {
28
+ "Name": "Inner Liner Apply",
29
+ "Standard Time": "4 seconds",
30
+ "Video_substeps_expected": {
31
+ "0-1 second": "Machine applies the first layer of the liner.",
32
+ "1-3 seconds": "Technician checks alignment and adjusts if needed.",
33
+ "3-4 seconds": "Final inspection and confirmation."
34
+ }
35
+ },
36
+ 3: {
37
+ "Name": "Ply1 Apply",
38
+ "Standard Time": "4 seconds",
39
+ "Video_substeps_expected": {
40
+ "0-2 seconds": "First ply is loaded onto the machine.",
41
+ "2-4 seconds": "Technician inspects and adjusts ply placement."
42
+ }
43
+ },
44
+ 4: {
45
+ "Name": "Bead Set",
46
+ "Standard Time": "8 seconds",
47
+ "Video_substeps_expected": {
48
+ "0-3 seconds": "Bead is positioned and pre-set.",
49
+ "3-6 seconds": "Machine secures the bead in place.",
50
+ "6-8 seconds": "Technician confirms the bead alignment."
51
+ }
52
+ },
53
+ 5: {
54
+ "Name": "Turnup",
55
+ "Standard Time": "4 seconds",
56
+ "Video_substeps_expected": {
57
+ "0-2 seconds": "Turnup process begins with machine handling.",
58
+ "2-4 seconds": "Technician inspects the turnup and makes adjustments if necessary."
59
+ }
60
+ },
61
+ 6: {
62
+ "Name": "Sidewall Apply",
63
+ "Standard Time": "14 seconds",
64
+ "Video_substeps_expected": {
65
+ "0-5 seconds": "Sidewall material is positioned by the machine.",
66
+ "5-10 seconds": "Technician checks for alignment and begins application.",
67
+ "10-14 seconds": "Final adjustments and confirmation of sidewall placement."
68
+ }
69
+ },
70
+ 7: {
71
+ "Name": "Sidewall Stitching",
72
+ "Standard Time": "5 seconds",
73
+ "Video_substeps_expected": {
74
+ "0-2 seconds": "Stitching process begins automatically.",
75
+ "2-4 seconds": "Technician inspects stitching for any irregularities.",
76
+ "4-5 seconds": "Machine completes stitching process."
77
+ }
78
+ },
79
+ 8: {
80
+ "Name": "Carcass Unload",
81
+ "Standard Time": "7 seconds",
82
+ "Video_substeps_expected": {
83
+ "0-3 seconds": "Technician unloads(removes) carcass(tire) from the machine."
84
+ },
85
+ "Potential_Delay_reasons": [
86
+ "Person not available in time(in 3 sec) to remove carcass.",
87
+ "Person is doing bead(ring) insertion before carcass unload causing unload to be delayed by more than 3 sec"
88
+ ]
89
+ }
90
+ }
91
+
92
+ return step_details.get(step_number, {"Error": "Invalid step number. Please provide a valid step number."})
93
+
94
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  def load_video(video_data, strategy='chat'):
97
  """Loads and processes video data into a format suitable for model input."""
98
  bridge.set_bridge('torch')
99
  num_frames = 24
100
 
101
+ if isinstance(video_data, str):
102
  decord_vr = VideoReader(video_data, ctx=cpu(0))
103
  else:
104
  decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
 
174
 
175
  return response
176
 
177
+ def get_analysis_prompt(step_number):
178
  """Constructs the prompt for analyzing delay reasons based on the selected step."""
179
+ step_info = get_step_info(step_number)
180
+
181
+ if "Error" in step_info:
182
+ return step_info["Error"]
183
+
184
+ step_name = step_info["Name"]
185
+ standard_time = step_info["Standard Time"]
186
+ analysis = step_info["Analysis"]
187
+
188
+ return f"""
189
+ You are an AI expert system specialized in analyzing manufacturing processes and identifying production delays in tire manufacturing. Your role is to accurately classify delay reasons based on visual evidence from production line footage.
190
  Task Context:
191
+ You are analyzing video footage from Step {step_number} of a tire manufacturing process where a delay has been detected. The step is called {step_name}, and its standard time is {standard_time}.
 
192
  Required Analysis:
193
+ Carefully observe the video for visual cues indicating production interruption.
194
+ - If no person is visible in any of the frames, the reason probably might be due to their absence.
195
+ - If a person is visible in the video and is observed touching and modifying the layers of the tire, it indicates an issue with tire patching, and the person might be repairing it.
196
+ - Compare observed evidence against the following possible delay reasons:
197
+ - {analysis}
198
+ Following are the subactivities needs to happen in this step.
199
+
200
+ {get_step_info(step_number)}
201
+
202
+ Important:Please provide your output in the following format.
203
+ Output_Examples = {
204
+ ["Delay in Bead Insertion", "Lack of raw material"],
205
+ ["Inner Liner Adjustment by Technician", "Person rebuilding defective Tire Sections"],
206
+ ["Manual Adjustment in Ply1 Apply", "Technician repairing defective Tire Sections"],
207
+ ["Delay in Bead Set", "Lack of raw material"],
208
+ ["Delay in Turnup", "Lack of raw material"],
209
+ ["Person Repairing Sidewall", "Person rebuilding defective Tire Sections"],
210
+ ["Delay in Sidewall Stitching", "Lack of raw material"],
211
+ ["No person available to load Carcass", "No person available to collect tire"]
212
+ }
213
+ 1. **Selected Reason:** [State the most likely reason from the given options]
214
+ 2. **Visual Evidence:** [Describe specific visual cues that support your selection]
215
+ 3. **Reasoning:** [Explain why this reason best matches the observed evidence]
216
+ 4. **Alternative Analysis:** [Brief explanation of why other possible reasons are less likely]
217
+ """
218
+
219
  model, tokenizer = load_model()
220
 
221
  def inference(video, step_number):
222
+ """Analyzes video to predict possible issues based on the manufacturing step."""
223
  try:
224
  if not video:
225
  return "Please upload a video first."
226
 
227
+ prompt = get_analysis_prompt(step_number)
228
+ temperature = 0.3
 
229
  response = predict(prompt, video, temperature, model, tokenizer)
230
 
231
  return response
 
233
  return f"An error occurred during analysis: {str(e)}"
234
 
235
  def create_interface():
236
+ """Creates the Gradio interface for the Manufacturing Analysis System."""
237
  with gr.Blocks() as demo:
238
  gr.Markdown("""
239
+ # Manufacturing Analysis System
240
  Upload a video of the manufacturing step and select the step number.
241
+ The system will analyze the video and provide observations.
242
  """)
243
 
244
  with gr.Row():
245
  with gr.Column():
246
  video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
247
  step_number = gr.Dropdown(
248
+ choices=[f"Step {i}" for i in range(1, 9)],
249
  label="Manufacturing Step"
250
  )
251
+ analyze_btn = gr.Button("Analyze", variant="primary")
252
 
253
  with gr.Column():
254
  output = gr.Textbox(label="Analysis Result", lines=10)
255
 
 
 
 
 
 
 
 
 
 
 
256
  gr.Examples(
257
+ examples=[
258
+ ["7838_step2_2_eval.mp4", "Step 2"],
259
+ ["7838_step6_2_eval.mp4", "Step 6"],
260
+ ["7838_step8_1_eval.mp4", "Step 8"],
261
+ ["7993_step6_3_eval.mp4", "Step 6"],
262
+ ["7993_step8_3_eval.mp4", "Step 8"]
263
+ ],
264
  inputs=[video, step_number],
265
  cache_examples=False
266
  )