kreemyyyy commited on
Commit
dc428aa
·
verified ·
1 Parent(s): e3560bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -58
app.py CHANGED
@@ -1,13 +1,16 @@
 
 
 
 
1
  import gradio as gr
2
  import logging
3
  from roboflow import Roboflow
4
  from PIL import Image, ImageDraw
5
  import cv2
6
  import numpy as np
7
- import os
8
  from math import atan2, degrees
9
  import asyncio
10
- import multiprocessing
11
 
12
  # Configure logging
13
  logging.basicConfig(
@@ -25,72 +28,62 @@ PROJECT_NAME = "model_verification_project"
25
  VERSION_NUMBER = 2
26
 
27
  # ----------------------------
28
- # Function to generate handwriting image using Pyppeteer in a separate process
29
  # ----------------------------
30
- def generate_handwriting_image_process(text_prompt, screenshot_path, return_dict):
31
  """
32
- This function runs in a separate process so that Pyppeteer's signal handling
33
- works correctly in its main thread.
34
  """
35
- import asyncio
36
- from pyppeteer import launch
37
-
38
- async def _generate():
39
- # Launch Chromium with additional flags for containerized environments
40
- browser = await launch(
41
- headless=True,
42
- args=[
43
- '--no-sandbox',
44
- '--disable-setuid-sandbox',
45
- '--disable-dev-shm-usage',
46
- '--disable-gpu',
47
- '--single-process',
48
- '--no-zygote',
49
- '--window-size=1920,1080'
50
- ]
51
- )
52
  page = await browser.newPage()
53
  await page.goto('https://www.calligraphr.com/en/font/', {'waitUntil': 'networkidle2'})
54
  await page.waitForSelector('#text-input')
55
  await page.type('#text-input', text_prompt)
56
- await asyncio.sleep(3) # Increased wait time for the page to render
57
 
58
- # Adjust the clip values as needed to capture the proper area of the page
 
 
 
59
  await page.screenshot({
60
  'path': screenshot_path,
61
  'clip': {'x': 100, 'y': 200, 'width': 600, 'height': 150}
62
  })
63
- await browser.close()
64
  return screenshot_path
65
 
66
- # Create a new event loop for this process
67
- loop = asyncio.new_event_loop()
68
- asyncio.set_event_loop(loop)
69
- try:
70
- result = loop.run_until_complete(_generate())
71
- return_dict['result'] = result
72
- except Exception as e:
73
- logging.error("Error in handwriting generation process: " + str(e))
74
- return_dict['result'] = None
75
  finally:
76
- loop.close()
77
 
78
- def get_handwriting_image(text_prompt, screenshot_path="/tmp/handwriting.png"):
79
  """
80
- Starts a separate process to generate a handwriting image and returns the image path.
81
  """
82
- manager = multiprocessing.Manager()
83
- return_dict = manager.dict()
84
- process = multiprocessing.Process(
85
- target=generate_handwriting_image_process,
86
- args=(text_prompt, screenshot_path, return_dict)
87
- )
88
- process.start()
89
- process.join()
90
- return return_dict.get('result', None)
91
 
92
  # ----------------------------
93
- # Helper: Detect paper angle within bounding box
94
  # ----------------------------
95
  def detect_paper_angle(image, bounding_box):
96
  x1, y1, x2, y2 = bounding_box
@@ -133,20 +126,23 @@ def process_image(image, text):
133
  prediction = model.predict(input_image_path, confidence=70, overlap=50).json()
134
  logging.debug(f"Inference result: {prediction}")
135
 
136
- # Convert image for processing
137
  pil_image = image.convert("RGBA")
138
  logging.debug("Converted image to RGBA mode.")
139
 
140
- # Process each detected object (assumed to be white paper)
141
  for obj in prediction['predictions']:
 
142
  white_paper_width = obj['width']
143
  white_paper_height = obj['height']
 
 
144
  padding_x = int(white_paper_width * 0.1)
145
  padding_y = int(white_paper_height * 0.1)
146
  box_width = white_paper_width - 2 * padding_x
147
  box_height = white_paper_height - 2 * padding_y
148
  logging.debug(f"Padded white paper dimensions: width={box_width}, height={box_height}.")
149
 
 
150
  x1_padded = int(obj['x'] - white_paper_width / 2 + padding_x)
151
  y1_padded = int(obj['y'] - white_paper_height / 2 + padding_y)
152
  x2_padded = int(obj['x'] + white_paper_width / 2 - padding_x)
@@ -156,15 +152,15 @@ def process_image(image, text):
156
  angle = detect_paper_angle(np.array(image), (x1_padded, y1_padded, x2_padded, y2_padded))
157
  logging.debug(f"Detected paper angle: {angle} degrees.")
158
 
159
- # (Optional) Save a debug image with the bounding box drawn
160
  debug_layer = pil_image.copy()
161
  debug_draw = ImageDraw.Draw(debug_layer)
162
  debug_draw.rectangle([(x1_padded, y1_padded), (x2_padded, y2_padded)], outline="red", width=3)
163
  debug_layer.save("/tmp/debug_bounding_box.png")
164
  logging.debug("Saved bounding box debug image to /tmp/debug_bounding_box.png.")
165
 
166
- # Generate handwriting image using the separate process
167
- handwriting_path = get_handwriting_image(text, "/tmp/handwriting.png")
168
  if not handwriting_path:
169
  logging.error("Handwriting image generation failed.")
170
  continue
@@ -173,7 +169,7 @@ def process_image(image, text):
173
  handwriting_img = handwriting_img.resize((box_width, box_height), Image.ANTIALIAS)
174
  rotated_handwriting = handwriting_img.rotate(-angle, resample=Image.BICUBIC, expand=True)
175
 
176
- # Composite the handwriting onto the original image
177
  text_layer = Image.new("RGBA", pil_image.size, (255, 255, 255, 0))
178
  paste_x = int(obj['x'] - rotated_handwriting.size[0] / 2)
179
  paste_y = int(obj['y'] - rotated_handwriting.size[1] / 2)
@@ -181,7 +177,7 @@ def process_image(image, text):
181
  pil_image = Image.alpha_composite(pil_image, text_layer)
182
  logging.debug("Handwriting layer composited onto the original image.")
183
 
184
- # Save and return the output image
185
  output_image_path = "/tmp/output_image.png"
186
  pil_image.convert("RGB").save(output_image_path)
187
  logging.debug(f"Output image saved to {output_image_path}.")
@@ -192,7 +188,7 @@ def process_image(image, text):
192
  return None
193
 
194
  # ----------------------------
195
- # Gradio interface function
196
  # ----------------------------
197
  def gradio_inference(image, text):
198
  logging.debug("Starting Gradio inference.")
@@ -204,7 +200,7 @@ def gradio_inference(image, text):
204
  return None, None, "An error occurred while processing the image. Please check the logs."
205
 
206
  # ----------------------------
207
- # Gradio interface definition
208
  # ----------------------------
209
  interface = gr.Interface(
210
  fn=gradio_inference,
@@ -219,7 +215,9 @@ interface = gr.Interface(
219
  ],
220
  title="Roboflow Detection with Handwriting Overlay",
221
  description="Upload an image and enter text to overlay. The Roboflow model detects the white paper area, and a handwriting image is generated via Calligraphr using Pyppeteer. The output image is composited accordingly.",
222
- allow_flagging="never"
 
 
223
  )
224
 
225
  if __name__ == "__main__":
 
1
+ import nest_asyncio
2
+ nest_asyncio.apply()
3
+
4
+ import os
5
  import gradio as gr
6
  import logging
7
  from roboflow import Roboflow
8
  from PIL import Image, ImageDraw
9
  import cv2
10
  import numpy as np
 
11
  from math import atan2, degrees
12
  import asyncio
13
+ from pyppeteer import launch
14
 
15
  # Configure logging
16
  logging.basicConfig(
 
28
  VERSION_NUMBER = 2
29
 
30
  # ----------------------------
31
+ # Asynchronous function to generate handwriting image via Pyppeteer
32
  # ----------------------------
33
+ async def _generate_handwriting_image(text_prompt, screenshot_path):
34
  """
35
+ Launches a headless browser, goes to Calligraphr, types the text,
36
+ and takes a screenshot of the rendered handwriting.
37
  """
38
+ # Launch Chromium with additional flags for containerized environments
39
+ browser = await launch(
40
+ headless=True,
41
+ handleSIGINT=False,
42
+ handleSIGTERM=False,
43
+ handleSIGHUP=False,
44
+ args=[
45
+ '--no-sandbox',
46
+ '--disable-setuid-sandbox',
47
+ '--disable-dev-shm-usage',
48
+ '--disable-gpu',
49
+ '--single-process',
50
+ '--no-zygote',
51
+ '--window-size=1920,1080'
52
+ ]
53
+ )
54
+ try:
55
  page = await browser.newPage()
56
  await page.goto('https://www.calligraphr.com/en/font/', {'waitUntil': 'networkidle2'})
57
  await page.waitForSelector('#text-input')
58
  await page.type('#text-input', text_prompt)
 
59
 
60
+ # Give the page time to render the handwriting
61
+ await asyncio.sleep(3)
62
+
63
+ # Screenshot a portion of the page that should contain the handwriting
64
  await page.screenshot({
65
  'path': screenshot_path,
66
  'clip': {'x': 100, 'y': 200, 'width': 600, 'height': 150}
67
  })
 
68
  return screenshot_path
69
 
 
 
 
 
 
 
 
 
 
70
  finally:
71
+ await browser.close()
72
 
73
+ def generate_handwriting_image(text_prompt, screenshot_path="/tmp/handwriting.png"):
74
  """
75
+ Synchronous wrapper around the async Pyppeteer call.
76
  """
77
+ try:
78
+ loop = asyncio.get_event_loop()
79
+ result = loop.run_until_complete(_generate_handwriting_image(text_prompt, screenshot_path))
80
+ return result
81
+ except Exception as e:
82
+ logging.error(f"Error generating handwriting image: {e}")
83
+ return None
 
 
84
 
85
  # ----------------------------
86
+ # Detect paper angle within bounding box
87
  # ----------------------------
88
  def detect_paper_angle(image, bounding_box):
89
  x1, y1, x2, y2 = bounding_box
 
126
  prediction = model.predict(input_image_path, confidence=70, overlap=50).json()
127
  logging.debug(f"Inference result: {prediction}")
128
 
 
129
  pil_image = image.convert("RGBA")
130
  logging.debug("Converted image to RGBA mode.")
131
 
132
+ # Iterate over detected objects (assumed white paper)
133
  for obj in prediction['predictions']:
134
+ # Paper dimensions
135
  white_paper_width = obj['width']
136
  white_paper_height = obj['height']
137
+
138
+ # Padding
139
  padding_x = int(white_paper_width * 0.1)
140
  padding_y = int(white_paper_height * 0.1)
141
  box_width = white_paper_width - 2 * padding_x
142
  box_height = white_paper_height - 2 * padding_y
143
  logging.debug(f"Padded white paper dimensions: width={box_width}, height={box_height}.")
144
 
145
+ # Calculate padded coordinates
146
  x1_padded = int(obj['x'] - white_paper_width / 2 + padding_x)
147
  y1_padded = int(obj['y'] - white_paper_height / 2 + padding_y)
148
  x2_padded = int(obj['x'] + white_paper_width / 2 - padding_x)
 
152
  angle = detect_paper_angle(np.array(image), (x1_padded, y1_padded, x2_padded, y2_padded))
153
  logging.debug(f"Detected paper angle: {angle} degrees.")
154
 
155
+ # (Optional) debug bounding box
156
  debug_layer = pil_image.copy()
157
  debug_draw = ImageDraw.Draw(debug_layer)
158
  debug_draw.rectangle([(x1_padded, y1_padded), (x2_padded, y2_padded)], outline="red", width=3)
159
  debug_layer.save("/tmp/debug_bounding_box.png")
160
  logging.debug("Saved bounding box debug image to /tmp/debug_bounding_box.png.")
161
 
162
+ # Generate handwriting image
163
+ handwriting_path = generate_handwriting_image(text, "/tmp/handwriting.png")
164
  if not handwriting_path:
165
  logging.error("Handwriting image generation failed.")
166
  continue
 
169
  handwriting_img = handwriting_img.resize((box_width, box_height), Image.ANTIALIAS)
170
  rotated_handwriting = handwriting_img.rotate(-angle, resample=Image.BICUBIC, expand=True)
171
 
172
+ # Composite the handwriting
173
  text_layer = Image.new("RGBA", pil_image.size, (255, 255, 255, 0))
174
  paste_x = int(obj['x'] - rotated_handwriting.size[0] / 2)
175
  paste_y = int(obj['y'] - rotated_handwriting.size[1] / 2)
 
177
  pil_image = Image.alpha_composite(pil_image, text_layer)
178
  logging.debug("Handwriting layer composited onto the original image.")
179
 
180
+ # Save output
181
  output_image_path = "/tmp/output_image.png"
182
  pil_image.convert("RGB").save(output_image_path)
183
  logging.debug(f"Output image saved to {output_image_path}.")
 
188
  return None
189
 
190
  # ----------------------------
191
+ # Gradio inference function
192
  # ----------------------------
193
  def gradio_inference(image, text):
194
  logging.debug("Starting Gradio inference.")
 
200
  return None, None, "An error occurred while processing the image. Please check the logs."
201
 
202
  # ----------------------------
203
+ # Gradio interface
204
  # ----------------------------
205
  interface = gr.Interface(
206
  fn=gradio_inference,
 
215
  ],
216
  title="Roboflow Detection with Handwriting Overlay",
217
  description="Upload an image and enter text to overlay. The Roboflow model detects the white paper area, and a handwriting image is generated via Calligraphr using Pyppeteer. The output image is composited accordingly.",
218
+ allow_flagging="never",
219
+ # Limit concurrency to 1 to reduce potential conflicts with the single event loop
220
+ concurrency_count=1
221
  )
222
 
223
  if __name__ == "__main__":