import os import torch from diffusers import FluxPipeline # type: ignore import gradio as gr # type: ignore from huggingface_hub import login, InferenceClient pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power token = os.getenv("HF_TOKEN") login(token=token) client = InferenceClient( provider="together", api_key=token, model="black-forest-labs/FLUX.1-dev", token=token ) def generate_image(prompt): image = pipe( prompt, height=1024, width=1024, guidance_scale=3.5, num_inference_steps=50, max_sequence_length=512, generator=torch.Generator("cpu").manual_seed(0) ).images[0] return image gradio_app = gr.Interface( fn=generate_image, inputs=gr.inputs.Textbox(label="Entrez une description"), outputs=gr.outputs.Image(label="Image générée"), title="Générateur d'images IA", description="Entrez une description et générez une image correspondante." ) if __name__ == "__main__": gradio_app.launch()