VishalD1234 commited on
Commit
d20364e
·
verified ·
1 Parent(s): 78f35cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -55
app.py CHANGED
@@ -6,22 +6,10 @@ from decord import cpu, VideoReader, bridge
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from transformers import BitsAndBytesConfig
8
 
9
-
10
  MODEL_PATH = "THUDM/cogvlm2-llama3-caption"
11
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
12
  TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
13
 
14
- DELAY_REASONS = {
15
- "Step 1": ["Delay in Bead Insertion","Lack of raw material"],
16
- "Step 2": ["Inner Liner Adjustment by Technician","Person rebuilding defective Tire Sections"],
17
- "Step 3": ["Manual Adjustment in Ply1 apply","Technician repairing defective Tire Sections"],
18
- "Step 4": ["Delay in Bead set","Lack of raw material"],
19
- "Step 5": ["Delay in Turnup","Lack of raw material"],
20
- "Step 6": ["Person Repairing sidewall","Person rebuilding defective Tire Sections"],
21
- "Step 7": ["Delay in sidewall stitching","Lack of raw material"],
22
- "Step 8": ["No person available to load Carcass","No person available to collect tire"]
23
- }
24
-
25
 
26
  def get_step_info(step_name):
27
  """Returns detailed information about a manufacturing step."""
@@ -152,37 +140,13 @@ def predict(prompt, video_data, temperature, model, tokenizer):
152
 
153
  return response
154
 
155
- def get_analysis_prompt(step_number, possible_reasons):
156
- """Constructs the prompt for analyzing delay reasons based on the selected step."""
157
- return f"""You are an AI expert system specialized in analyzing manufacturing processes and identifying production delays in tire manufacturing. Your role is to accurately classify delay reasons based on visual evidence from production line footage.
158
- Task Context:
159
- You are analyzing video footage from Step {step_number} of a tire manufacturing process where a delay has been detected. Your task is to determine the most likely cause of the delay from the following possible reasons:
160
- {', '.join(possible_reasons)}
161
- Required Analysis:
162
- Carefully observe the video for visual cues indicating production interruption.
163
- If no person is visible in any of the frames, the reason probably might be due to his absence.
164
- If a person is visible in the video and is observed touching and modifying the layers of the tire, it means there is a issue with tyre being patched hence he is repairing it.
165
- Compare observed evidence against each possible delay reason.
166
- Select the most likely reason based on visual evidence.
167
- Please provide your analysis in the following format:
168
- 1. Selected Reason: [State the most likely reason from the given options]
169
- 2. Visual Evidence: [Describe specific visual cues that support your selection]
170
- 3. Reasoning: [Explain why this reason best matches the observed evidence]
171
- 4. Alternative Analysis: [Brief explanation of why other possible reasons are less likely]
172
- Important: Base your analysis solely on visual evidence from the video. Focus on concrete, observable details rather than assumptions. Clearly state if no person or specific activity is observed."""
173
-
174
-
175
- # Load model globally
176
- model, tokenizer = load_model()
177
-
178
  def inference(video, step_number):
179
- """Analyzes video to predict the most likely cause of delay in the selected manufacturing step."""
180
  try:
181
  if not video:
182
  return "Please upload a video first."
183
 
184
- possible_reasons = DELAY_REASONS[step_number]
185
- prompt = get_analysis_prompt(step_number, possible_reasons)
186
  temperature = 0.8
187
  response = predict(prompt, video, temperature, model, tokenizer)
188
 
@@ -191,38 +155,34 @@ def inference(video, step_number):
191
  return f"An error occurred during analysis: {str(e)}"
192
 
193
  def create_interface():
194
- """Creates the Gradio interface for the Manufacturing Delay Analysis System with examples."""
195
  with gr.Blocks() as demo:
196
  gr.Markdown("""
197
- # Manufacturing Delay Analysis System
198
  Upload a video of the manufacturing step and select the step number.
199
- The system will analyze the video and determine the most likely cause of delay.
200
  """)
201
 
202
  with gr.Row():
203
  with gr.Column():
204
  video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
205
  step_number = gr.Dropdown(
206
- choices=list(DELAY_REASONS.keys()),
207
  label="Manufacturing Step"
208
  )
209
- analyze_btn = gr.Button("Analyze Delay", variant="primary")
210
 
211
  with gr.Column():
212
  output = gr.Textbox(label="Analysis Result", lines=10)
213
 
214
- # Add examples
215
- examples = [
216
- ["7838_step2_2_eval.mp4", "Step 2"],
217
- ["7838_step6_2_eval.mp4", "Step 6"],
218
- ["7838_step8_1_eval.mp4", "Step 8"],
219
- ["7993_step6_3_eval.mp4", "Step 6"],
220
- ["7993_step8_3_eval.mp4", "Step 8"]
221
-
222
- ]
223
-
224
  gr.Examples(
225
- examples=examples,
 
 
 
 
 
 
226
  inputs=[video, step_number],
227
  cache_examples=False
228
  )
@@ -237,4 +197,4 @@ def create_interface():
237
 
238
  if __name__ == "__main__":
239
  demo = create_interface()
240
- demo.queue().launch(share=True)
 
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from transformers import BitsAndBytesConfig
8
 
 
9
  MODEL_PATH = "THUDM/cogvlm2-llama3-caption"
10
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
11
  TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
12
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  def get_step_info(step_name):
15
  """Returns detailed information about a manufacturing step."""
 
140
 
141
  return response
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  def inference(video, step_number):
144
+ """Analyzes video to predict possible issues based on the manufacturing step."""
145
  try:
146
  if not video:
147
  return "Please upload a video first."
148
 
149
+ prompt = f"You are analyzing video footage from Step {step_number} of a manufacturing process. Provide an analysis based on the observed video."
 
150
  temperature = 0.8
151
  response = predict(prompt, video, temperature, model, tokenizer)
152
 
 
155
  return f"An error occurred during analysis: {str(e)}"
156
 
157
  def create_interface():
158
+ """Creates the Gradio interface for the Manufacturing Analysis System."""
159
  with gr.Blocks() as demo:
160
  gr.Markdown("""
161
+ # Manufacturing Analysis System
162
  Upload a video of the manufacturing step and select the step number.
163
+ The system will analyze the video and provide observations.
164
  """)
165
 
166
  with gr.Row():
167
  with gr.Column():
168
  video = gr.Video(label="Upload Manufacturing Video", sources=["upload"])
169
  step_number = gr.Dropdown(
170
+ choices=[f"Step {i}" for i in range(1, 9)],
171
  label="Manufacturing Step"
172
  )
173
+ analyze_btn = gr.Button("Analyze", variant="primary")
174
 
175
  with gr.Column():
176
  output = gr.Textbox(label="Analysis Result", lines=10)
177
 
 
 
 
 
 
 
 
 
 
 
178
  gr.Examples(
179
+ examples=[
180
+ ["7838_step2_2_eval.mp4", "Step 2"],
181
+ ["7838_step6_2_eval.mp4", "Step 6"],
182
+ ["7838_step8_1_eval.mp4", "Step 8"],
183
+ ["7993_step6_3_eval.mp4", "Step 6"],
184
+ ["7993_step8_3_eval.mp4", "Step 8"]
185
+ ],
186
  inputs=[video, step_number],
187
  cache_examples=False
188
  )
 
197
 
198
  if __name__ == "__main__":
199
  demo = create_interface()
200
+ demo.queue().launch(share=True)