Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ class ModelActor:
|
|
17 |
"""
|
18 |
self.model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
|
19 |
|
20 |
-
def generate_image(self, prompt, prompt_name):
|
21 |
"""
|
22 |
Generates an image based on the provided prompt.
|
23 |
Parameters:
|
@@ -29,7 +29,7 @@ class ModelActor:
|
|
29 |
start_time = time.time()
|
30 |
process_id = os.getpid()
|
31 |
try:
|
32 |
-
output = self.model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
|
33 |
if isinstance(output.images, list) and len(output.images) > 0:
|
34 |
image = output.images[0]
|
35 |
buffered = BytesIO()
|
@@ -63,8 +63,7 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
|
|
63 |
model_actors = [ModelActor.remote() for _ in range(num_actors)]
|
64 |
tasks = [model_actors[i % num_actors].generate_image.remote(prompt, f"Prompt {paragraph_number}") for i, (paragraph_number, prompt) in enumerate(prompts)]
|
65 |
|
66 |
-
|
67 |
-
responses = await asyncio.gather(*[asyncio.to_thread(ray.get, task) for task in tasks])
|
68 |
images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
|
69 |
return images
|
70 |
|
@@ -91,7 +90,7 @@ gradio_interface = gr.Interface(
|
|
91 |
fn=process_prompt,
|
92 |
inputs=[gr.JSON(label="Sentence Mapping"), gr.JSON(label="Character Dict"), gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style")],
|
93 |
outputs="json"
|
94 |
-
).queue(default_concurrency_limit=20) #
|
95 |
|
96 |
if __name__ == "__main__":
|
97 |
gradio_interface.launch()
|
|
|
17 |
"""
|
18 |
self.model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
|
19 |
|
20 |
+
async def generate_image(self, prompt, prompt_name):
|
21 |
"""
|
22 |
Generates an image based on the provided prompt.
|
23 |
Parameters:
|
|
|
29 |
start_time = time.time()
|
30 |
process_id = os.getpid()
|
31 |
try:
|
32 |
+
output = await self.model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
|
33 |
if isinstance(output.images, list) and len(output.images) > 0:
|
34 |
image = output.images[0]
|
35 |
buffered = BytesIO()
|
|
|
63 |
model_actors = [ModelActor.remote() for _ in range(num_actors)]
|
64 |
tasks = [model_actors[i % num_actors].generate_image.remote(prompt, f"Prompt {paragraph_number}") for i, (paragraph_number, prompt) in enumerate(prompts)]
|
65 |
|
66 |
+
responses = await asyncio.gather(*[ray.get(task) for task in tasks])
|
|
|
67 |
images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
|
68 |
return images
|
69 |
|
|
|
90 |
fn=process_prompt,
|
91 |
inputs=[gr.JSON(label="Sentence Mapping"), gr.JSON(label="Character Dict"), gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style")],
|
92 |
outputs="json"
|
93 |
+
).queue(default_concurrency_limit=20) # Set concurrency limit to match the number of model actors
|
94 |
|
95 |
if __name__ == "__main__":
|
96 |
gradio_interface.launch()
|