Spaces:
Running
Running
from diffusers import DiffusionPipeline | |
import torch | |
import re | |
from PIL import Image | |
import io | |
from dotenv import load_dotenv | |
import os | |
load_dotenv() | |
# Ensure GPU is used if available | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") | |
pipeline = pipeline.to(device) | |
def generate_image_prompts(script): | |
# Split the script into sentences | |
sentences = re.split(r'(?<=[.!?]) +', script) | |
# Generate prompts for each sentence | |
prompts = [] | |
for sentence in sentences: | |
if sentence.strip(): # Ensure the sentence is not empty | |
prompts.append(sentence.strip()) | |
return prompts | |
def hf_pipeline(prompt): | |
API_URL = "https://api-inference.huggingface.co/models/Shakker-Labs/AWPortrait-FL" | |
headers = {"Authorization": f"Bearer {os.getenv('HF_API_TOKEN')}"} | |
response = requests.post(API_URL, headers=headers, json={"inputs": prompt}) | |
if response.status_code == 200: | |
return Image.open(io.BytesIO(response.content)) # Return the image directly | |
else: | |
raise Exception(f"Failed to generate image. Status code: {response.status_code}, {response.text}") | |
def generate_images(prompts): | |
image_files = [] | |
for idx, prompt in enumerate(prompts): | |
print(f"Generating image for prompt: {prompt}") | |
# Ensure the prompt is processed on the correct device | |
image = hf_pipeline(prompt).images[0] | |
filename = f"generated_image_{idx}.png" | |
image.save(filename) | |
image_files.append(filename) | |
return image_files |