Upload app.py
Browse files
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,43 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            from diffusers import StableDiffusionPipeline
         | 
| 3 | 
            +
            import gradio as gr
         | 
| 4 | 
            +
            from PIL import Image
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # Load the model and pipeline
         | 
| 7 | 
            +
            model_id = "ares1123/virtual-dress-try-on"
         | 
| 8 | 
            +
            pipeline = StableDiffusionPipeline.from_pretrained(model_id)
         | 
| 9 | 
            +
            pipeline.to("cuda" if torch.cuda.is_available() else "cpu")
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            def virtual_try_on(image, clothing_image):
         | 
| 12 | 
            +
                # Convert images to proper format and get dimensions
         | 
| 13 | 
            +
                width, height = image.size
         | 
| 14 | 
            +
                
         | 
| 15 | 
            +
                # Ensure dimensions are multiples of 8
         | 
| 16 | 
            +
                width = (width // 8) * 8
         | 
| 17 | 
            +
                height = (height // 8) * 8
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                # Resize images to fit the model's expected input
         | 
| 20 | 
            +
                image = image.resize((width, height))
         | 
| 21 | 
            +
                clothing_image = clothing_image.resize((width, height))
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                # Define a prompt describing what you want the model to do
         | 
| 24 | 
            +
                prompt = "A person wearing new clothes"
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                # Process the images using the model
         | 
| 27 | 
            +
                result = pipeline(prompt=prompt, image=image, conditioning_image=clothing_image)
         | 
| 28 | 
            +
                try_on_image = result.images[0]
         | 
| 29 | 
            +
                
         | 
| 30 | 
            +
                return try_on_image
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            # Set up a simple Gradio interface for testing
         | 
| 33 | 
            +
            interface = gr.Interface(
         | 
| 34 | 
            +
                fn=virtual_try_on,
         | 
| 35 | 
            +
                inputs=[gr.Image(type="pil", label="User Image"), 
         | 
| 36 | 
            +
                        gr.Image(type="pil", label="Clothing Image")],
         | 
| 37 | 
            +
                outputs=gr.Image(type="pil"),
         | 
| 38 | 
            +
                title="Virtual Dress Try-On",
         | 
| 39 | 
            +
                description="Upload an image of yourself and a clothing image to try it on virtually!"
         | 
| 40 | 
            +
            )
         | 
| 41 | 
            +
             | 
| 42 | 
            +
            # Launch the interface
         | 
| 43 | 
            +
            interface.launch(share=True)
         | 
