VishalD1234 commited on
Commit
260aafd
·
verified ·
1 Parent(s): 386e68e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -150
app.py CHANGED
@@ -6,109 +6,29 @@ from decord import cpu, VideoReader, bridge
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from transformers import BitsAndBytesConfig
8
 
 
9
  MODEL_PATH = "THUDM/cogvlm2-video-llama3-chat"
10
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
11
  TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
12
 
13
- # Delay Reasons for Each Manufacturing Step
14
  DELAY_REASONS = {
15
- "Step 1": ["Delay in Bead Insertion", "Lack of raw material"],
16
- "Step 2": ["Inner Liner Adjustment by Technician", "Person rebuilding defective Tire Sections"],
17
- "Step 3": ["Manual Adjustment in Ply1 apply", "Technician repairing defective Tire Sections"],
18
- "Step 4": ["Delay in Bead set", "Lack of raw material"],
19
- "Step 5": ["Delay in Turnup", "Lack of raw material"],
20
- "Step 6": ["Person Repairing sidewall", "Person rebuilding defective Tire Sections"],
21
- "Step 7": ["Delay in sidewall stitching", "Lack of raw material"],
22
- "Step 8": ["No person available to load Carcass", "No person available to collect tire"]
23
  }
24
 
25
- def get_step_info(step_number):
26
- """Returns detailed information about a manufacturing step."""
27
- step_details = {
28
- 1: {
29
- "Name": "Bead Insertion",
30
- "Standard Time": "4 seconds",
31
- "Video_substeps_expected": {
32
- "0-1 second": "Machine starts bead insertion process.",
33
- "1-3 seconds": "Beads are aligned and positioned.",
34
- "3-4 seconds": "Final adjustment and confirmation of bead placement."
35
- }
36
- },
37
- 2: {
38
- "Name": "Inner Liner Apply",
39
- "Standard Time": "4 seconds",
40
- "Video_substeps_expected": {
41
- "0-1 second": "Machine applies the first layer of the liner.",
42
- "1-3 seconds": "Technician checks alignment and adjusts if needed.",
43
- "3-4 seconds": "Final inspection and confirmation."
44
- }
45
- },
46
- 3: {
47
- "Name": "Ply1 Apply",
48
- "Standard Time": "4 seconds",
49
- "Video_substeps_expected": {
50
- "0-2 seconds": "First ply is loaded onto the machine.",
51
- "2-4 seconds": "Technician inspects and adjusts ply placement."
52
- }
53
- },
54
- 4: {
55
- "Name": "Bead Set",
56
- "Standard Time": "8 seconds",
57
- "Video_substeps_expected": {
58
- "0-3 seconds": "Bead is positioned and pre-set.",
59
- "3-6 seconds": "Machine secures the bead in place.",
60
- "6-8 seconds": "Technician confirms the bead alignment."
61
- }
62
- },
63
- 5: {
64
- "Name": "Turnup",
65
- "Standard Time": "4 seconds",
66
- "Video_substeps_expected": {
67
- "0-2 seconds": "Turnup process begins with machine handling.",
68
- "2-4 seconds": "Technician inspects the turnup and makes adjustments if necessary."
69
- }
70
- },
71
- 6: {
72
- "Name": "Sidewall Apply",
73
- "Standard Time": "14 seconds",
74
- "Video_substeps_expected": {
75
- "0-5 seconds": "Sidewall material is positioned by the machine.",
76
- "5-10 seconds": "Technician checks for alignment and begins application.",
77
- "10-14 seconds": "Final adjustments and confirmation of sidewall placement."
78
- }
79
- },
80
- 7: {
81
- "Name": "Sidewall Stitching",
82
- "Standard Time": "5 seconds",
83
- "Video_substeps_expected": {
84
- "0-2 seconds": "Stitching process begins automatically.",
85
- "2-4 seconds": "Technician inspects stitching for any irregularities.",
86
- "4-5 seconds": "Machine completes stitching process."
87
- }
88
- },
89
- 8: {
90
- "Name": "Carcass Unload",
91
- "Standard Time": "7 seconds",
92
- "Video_substeps_expected": {
93
- "0-3 seconds": "Technician unloads(removes) carcass(tire) from the machine."
94
- },
95
- "Potential_Delay_reasons": [
96
- "Person not available in time(in 3 sec) to remove carcass.",
97
- "Person is doing bead(ring) insertion before carcass unload causing unload to be delayed by more than 3 sec"
98
- ]
99
- }
100
- }
101
-
102
- return step_details.get(step_number, {"Error": "Invalid step number. Please provide a valid step number."})
103
-
104
-
105
-
106
  def load_video(video_data, strategy='chat'):
107
  """Loads and processes video data into a format suitable for model input."""
108
  bridge.set_bridge('torch')
109
  num_frames = 24
110
 
111
- if isinstance(video_data, str):
112
  decord_vr = VideoReader(video_data, ctx=cpu(0))
113
  else:
114
  decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
@@ -171,7 +91,7 @@ def predict(prompt, video_data, temperature, model, tokenizer):
171
  gen_kwargs = {
172
  "max_new_tokens": 2048,
173
  "pad_token_id": 128002,
174
- "top_k": 3,
175
  "do_sample": False,
176
  "top_p": 0.1,
177
  "temperature": temperature,
@@ -184,62 +104,38 @@ def predict(prompt, video_data, temperature, model, tokenizer):
184
 
185
  return response
186
 
187
- def get_analysis_prompt(step_number):
188
  """Constructs the prompt for analyzing delay reasons based on the selected step."""
189
- step_info = get_step_info(step_number)
190
-
191
- if "Error" in step_info:
192
- return step_info["Error"]
193
-
194
- step_name = step_info["Name"]
195
- standard_time = step_info["Standard Time"]
196
- analysis = step_info["Analysis"]
197
-
198
- return f"""
199
- You are an AI expert system specialized in analyzing manufacturing processes and identifying production delays in tire manufacturing. Your role is to accurately classify delay reasons based on visual evidence from production line footage.
200
  Task Context:
201
- You are analyzing video footage from Step {step_number} of a tire manufacturing process where a delay has been detected. The step is called {step_name}, and its standard time is {standard_time}.
 
202
  Required Analysis:
203
- Carefully observe the video for visual cues indicating production interruption.
204
- - If no person is visible in any of the frames, the reason probably might be due to their absence.
205
- - If a person is visible in the video and is observed touching and modifying the layers of the tire, it indicates an issue with tire patching, and the person might be repairing it.
206
- - Compare observed evidence against the following possible delay reasons:
207
- - {analysis}
208
- Following are the subactivities needs to happen in this step.
209
-
210
- {get_step_info(step_number)}
211
-
212
- Important:Please provide your output in the following format:
213
-
214
- Output_Format = {
215
- ["Delay in Bead Insertion", "Lack of raw material"],
216
- ["Inner Liner Adjustment by Technician", "Person rebuilding defective Tire Sections"],
217
- ["Manual Adjustment in Ply1 Apply", "Technician repairing defective Tire Sections"],
218
- ["Delay in Bead Set", "Lack of raw material"],
219
- ["Delay in Turnup", "Lack of raw material"],
220
- ["Person Repairing Sidewall", "Person rebuilding defective Tire Sections"],
221
- ["Delay in Sidewall Stitching", "Lack of raw material"],
222
- ["No person available to load Carcass", "No person available to collect tire"]
223
- }
224
- 1. **Selected Reason:** [State the most likely reason from the given options]
225
- 2. **Visual Evidence:** [Describe specific visual cues that support your selection]
226
- 3. **Reasoning:** [Explain why this reason best matches the observed evidence]
227
- 4. **Alternative Analysis:** [Brief explanation of why other possible reasons are less likely]
228
- Base your analysis solely on visual evidence from the video. Focus on concrete, observable details rather than assumptions. Clearly state if no person or specific activity is observed.
229
- """
230
-
231
-
232
-
233
  model, tokenizer = load_model()
234
 
235
  def inference(video, step_number):
236
- """Analyzes video to predict possible issues based on the manufacturing step."""
237
  try:
238
  if not video:
239
  return "Please upload a video first."
240
 
241
- prompt = get_analysis_prompt(step_number)
242
- temperature = 0.3
 
243
  response = predict(prompt, video, temperature, model, tokenizer)
244
 
245
  return response
@@ -247,34 +143,38 @@ def inference(video, step_number):
247
  return f"An error occurred during analysis: {str(e)}"
248
 
249
  def create_interface():
250
- """Creates the Gradio interface for the Manufacturing Analysis System."""
251
  with gr.Blocks() as demo:
252
  gr.Markdown("""
253
- # Manufacturing Analysis System
254
  Upload a video of the manufacturing step and select the step number.
255
- The system will analyze the video and provide observations.
256
  """)
257
 
258
  with gr.Row():
259
  with gr.Column():
260
  video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
261
  step_number = gr.Dropdown(
262
- choices=[f"Step {i}" for i in range(1, 9)],
263
  label="Manufacturing Step"
264
  )
265
- analyze_btn = gr.Button("Analyze", variant="primary")
266
 
267
  with gr.Column():
268
  output = gr.Textbox(label="Analysis Result", lines=10)
269
 
 
 
 
 
 
 
 
 
 
 
270
  gr.Examples(
271
- examples=[
272
- ["7838_step2_2_eval.mp4", "Step 2"],
273
- ["7838_step6_2_eval.mp4", "Step 6"],
274
- ["7838_step8_1_eval.mp4", "Step 8"],
275
- ["7993_step6_3_eval.mp4", "Step 6"],
276
- ["7993_step8_3_eval.mp4", "Step 8"]
277
- ],
278
  inputs=[video, step_number],
279
  cache_examples=False
280
  )
 
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from transformers import BitsAndBytesConfig
8
 
9
+
10
  MODEL_PATH = "THUDM/cogvlm2-video-llama3-chat"
11
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
12
  TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
13
 
14
+
15
  DELAY_REASONS = {
16
+ "Step 1": ["Delay in Bead Insertion","Lack of raw material"],
17
+ "Step 2": ["Inner Liner Adjustment by Technician","Person rebuilding defective Tire Sections"],
18
+ "Step 3": ["Manual Adjustment in Ply1 apply","Technician repairing defective Tire Sections"],
19
+ "Step 4": ["Delay in Bead set","Lack of raw material"],
20
+ "Step 5": ["Delay in Turnup","Lack of raw material"],
21
+ "Step 6": ["Person Repairing sidewall","Person rebuilding defective Tire Sections"],
22
+ "Step 7": ["Delay in sidewall stitching","Lack of raw material"],
23
+ "Step 8": ["No person available to load Carcass","No person available to collect tire"]
24
  }
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def load_video(video_data, strategy='chat'):
27
  """Loads and processes video data into a format suitable for model input."""
28
  bridge.set_bridge('torch')
29
  num_frames = 24
30
 
31
+ if isinstance(video_data, str):
32
  decord_vr = VideoReader(video_data, ctx=cpu(0))
33
  else:
34
  decord_vr = VideoReader(io.BytesIO(video_data), ctx=cpu(0))
 
91
  gen_kwargs = {
92
  "max_new_tokens": 2048,
93
  "pad_token_id": 128002,
94
+ "top_k": 1,
95
  "do_sample": False,
96
  "top_p": 0.1,
97
  "temperature": temperature,
 
104
 
105
  return response
106
 
107
+ def get_analysis_prompt(step_number, possible_reasons):
108
  """Constructs the prompt for analyzing delay reasons based on the selected step."""
109
+ return f"""You are an AI expert system specialized in analyzing manufacturing processes and identifying production delays in tire manufacturing. Your role is to accurately classify delay reasons based on visual evidence from production line footage.
 
 
 
 
 
 
 
 
 
 
110
  Task Context:
111
+ You are analyzing video footage from Step {step_number} of a tire manufacturing process where a delay has been detected. Your task is to determine the most likely cause of the delay from the following possible reasons:
112
+ {', '.join(possible_reasons)}
113
  Required Analysis:
114
+ Carefully observe the video for visual cues indicating production interruption.
115
+ If no person is visible in any of the frames, the reason probably might be due to his absence.
116
+ If a person is visible in the video and is observed touching and modifying the layers of the tire, it means there is a issue with tyre being patched hence he is repairing it.
117
+ Compare observed evidence against each possible delay reason.
118
+ Select the most likely reason based on visual evidence.
119
+ Please provide your analysis in the following format:
120
+ 1. Selected Reason: [State the most likely reason from the given options]
121
+ 2. Visual Evidence: [Describe specific visual cues that support your selection]
122
+ 3. Reasoning: [Explain why this reason best matches the observed evidence]
123
+ 4. Alternative Analysis: [Brief explanation of why other possible reasons are less likely]
124
+ Important: Base your analysis solely on visual evidence from the video. Focus on concrete, observable details rather than assumptions. Clearly state if no person or specific activity is observed."""
125
+
126
+
127
+ # Load model globally
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  model, tokenizer = load_model()
129
 
130
  def inference(video, step_number):
131
+ """Analyzes video to predict the most likely cause of delay in the selected manufacturing step."""
132
  try:
133
  if not video:
134
  return "Please upload a video first."
135
 
136
+ possible_reasons = DELAY_REASONS[step_number]
137
+ prompt = get_analysis_prompt(step_number, possible_reasons)
138
+ temperature = 0.8
139
  response = predict(prompt, video, temperature, model, tokenizer)
140
 
141
  return response
 
143
  return f"An error occurred during analysis: {str(e)}"
144
 
145
  def create_interface():
146
+ """Creates the Gradio interface for the Manufacturing Delay Analysis System with examples."""
147
  with gr.Blocks() as demo:
148
  gr.Markdown("""
149
+ # Manufacturing Delay Analysis System
150
  Upload a video of the manufacturing step and select the step number.
151
+ The system will analyze the video and determine the most likely cause of delay.
152
  """)
153
 
154
  with gr.Row():
155
  with gr.Column():
156
  video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
157
  step_number = gr.Dropdown(
158
+ choices=list(DELAY_REASONS.keys()),
159
  label="Manufacturing Step"
160
  )
161
+ analyze_btn = gr.Button("Analyze Delay", variant="primary")
162
 
163
  with gr.Column():
164
  output = gr.Textbox(label="Analysis Result", lines=10)
165
 
166
+ # Add examples
167
+ examples = [
168
+ ["7838_step2_2_eval.mp4", "Step 2"],
169
+ ["7838_step6_2_eval.mp4", "Step 6"],
170
+ ["7838_step8_1_eval.mp4", "Step 8"],
171
+ ["7993_step6_3_eval.mp4", "Step 6"],
172
+ ["7993_step8_3_eval.mp4", "Step 8"]
173
+
174
+ ]
175
+
176
  gr.Examples(
177
+ examples=examples,
 
 
 
 
 
 
178
  inputs=[video, step_number],
179
  cache_examples=False
180
  )