Update app.py
Browse files
app.py
CHANGED
@@ -15,13 +15,12 @@ text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name)
|
|
15 |
text_model = AutoModelForCausalLM.from_pretrained(text_generation_model_name)
|
16 |
text_generator = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
|
17 |
|
18 |
-
# Load the Stable Diffusion XL Model for Image Generation
|
19 |
pipe = DiffusionPipeline.from_pretrained(
|
20 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
21 |
-
torch_dtype=torch.float16,
|
22 |
-
variant="fp16",
|
23 |
)
|
24 |
-
|
|
|
25 |
|
26 |
# Function to generate image from text prompt using Stable Diffusion XL
|
27 |
def generate_image_from_text(translated_text):
|
|
|
15 |
text_model = AutoModelForCausalLM.from_pretrained(text_generation_model_name)
|
16 |
text_generator = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
|
17 |
|
18 |
+
# Load the Stable Diffusion XL Model for Image Generation in full precision (fp32)
|
19 |
pipe = DiffusionPipeline.from_pretrained(
|
20 |
+
"stabilityai/stable-diffusion-xl-base-1.0", # Remove torch_dtype and variant for CPU-friendly precision
|
|
|
|
|
21 |
)
|
22 |
+
|
23 |
+
pipe.to("cpu") # Use CPU for inference
|
24 |
|
25 |
# Function to generate image from text prompt using Stable Diffusion XL
|
26 |
def generate_image_from_text(translated_text):
|