File size: 1,902 Bytes
ca94d0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import gradio as gr
import torch
from transformers import pipeline
from PIL import Image
import numpy as np
from diffusers import DiffusionPipeline

# Ensure PyTorch runs on GPU if available
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load translation model (Arabic to English) on GPU
translator = pipeline("translation_ar_to_en", model="Helsinki-NLP/opus-mt-ar-en", device=0 if torch.cuda.is_available() else -1)

# Load image generation model
pipe = DiffusionPipeline.from_pretrained("sairajg/Text_To_Image")



def translate_and_generate(arabic_text):

    translated_text = translator(arabic_text)[0]['translation_text']
    result = pipe(translated_text).images[0]

    return result, translated_text

    try:

        # Ensure we get the actual image
        # if isinstance(result, tuple):
        #     result = result[0]  # Extract first element
        # if isinstance(result, torch.Tensor):
        #     result = result.cpu().numpy()  # Convert to NumPy array
        # if isinstance(result, np.ndarray):
        #     result = Image.fromarray((result * 255).astype(np.uint8))  # Ensure proper pixel range
        # elif not isinstance(result, Image.Image):
        #     raise ValueError(f"Unexpected output type: {type(result)}")

        debug_info += f"Translated Prompt: {translated_text}\nResult Type: {type(result)}"
        return result, debug_info
    except Exception as e:
        return None, debug_info


with gr.Blocks() as interface:
    gr.Markdown("### Arabic to Image Generator ")

    text_input = gr.Textbox(label="Enter Arabic Prompt:", placeholder="اكتب هنا...")
    generate_button = gr.Button("Generate Image ")
    image_output = gr.Image(label="Generated Image")
    text_output = gr.Textbox(label="Debug Output")

    generate_button.click(translate_and_generate, inputs=text_input, outputs=[image_output, text_output])

interface.launch()