import json
from urllib import request, parse
import gradio as gr
import random
import os 
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image as PILImage

prompt_text = """
{
  "10": {
    "inputs": {
      "ckpt_name": "DreamShaper_8_pruned.safetensors",
      "vae_name": "vae-ft-mse-840000-ema-pruned.ckpt",
      "clip_skip": -2,
      "lora_name": "None",
      "lora_model_strength": 1,
      "lora_clip_strength": 1,
      "positive": "In the style of Grand Theft Auto, loading screens, (palm trees), GTA style artwork, highly detailed, urban scene with numerous palm trees, neon lights, and graffiti, trending on ArtStation, preserving the individual's race, color and hair.",
      "negative": "(worst quality, low quality = 1.3)",
      "token_normalization": "none",
      "weight_interpretation": "comfy",
      "empty_latent_width": [
        "65",
        0
      ],
      "empty_latent_height": [
        "65",
        1
      ],
      "batch_size": 1,
      "cnet_stack": [
        "11",
        0
      ]
    },
    "class_type": "Efficient Loader",
    "_meta": {
      "title": "Efficient Loader"
    }
  },
  "11": {
    "inputs": {
      "switch_1": "On",
      "controlnet_1": "control_v11p_sd15_lineart_fp16.safetensors",
      "controlnet_strength_1": 0.7000000000000001,
      "start_percent_1": 0,
      "end_percent_1": 1,
      "switch_2": "On",
      "controlnet_2": "control_v11p_sd15_openpose_fp16.safetensors",
      "controlnet_strength_2": 1,
      "start_percent_2": 0,
      "end_percent_2": 1,
      "switch_3": "Off",
      "controlnet_3": "None",
      "controlnet_strength_3": 0.99,
      "start_percent_3": 0,
      "end_percent_3": 1,
      "image_1": [
        "107",
        0
      ],
      "image_2": [
        "109",
        0
      ]
    },
    "class_type": "CR Multi-ControlNet Stack",
    "_meta": {
      "title": "🕹️ CR Multi-ControlNet Stack"
    }
  },
  "12": {
    "inputs": {
      "low_threshold": 100,
      "high_threshold": 200,
      "resolution": 2048,
      "image": [
        "14",
        0
      ]
    },
    "class_type": "CannyEdgePreprocessor",
    "_meta": {
      "title": "Canny Edge"
    }
  },
  "13": {
    "inputs": {
      "images": [
        "12",
        0
      ]
    },
    "class_type": "PreviewImage",
    "_meta": {
      "title": "Preview Image"
    }
  },
  "14": {
    "inputs": {
      "image": "IMG_7593_2 (10).jpg",
      "upload": "image"
    },
    "class_type": "LoadImage",
    "_meta": {
      "title": "Load Image"
    }
  },
  "64": {
    "inputs": {
      "seed": 4091745839,
      "steps": 10,
      "cfg": 4,
      "sampler_name": "dpm_fast",
      "scheduler": "karras",
      "denoise": 1,
      "preview_method": "auto",
      "vae_decode": "true",
      "model": [
        "10",
        0
      ],
      "positive": [
        "10",
        1
      ],
      "negative": [
        "10",
        2
      ],
      "latent_image": [
        "10",
        3
      ],
      "optional_vae": [
        "10",
        4
      ],
      "script": [
        "101",
        0
      ]
    },
    "class_type": "KSampler (Efficient)",
    "_meta": {
      "title": "KSampler (Efficient)"
    }
  },
  "65": {
    "inputs": {
      "width": 512,
      "height": 512,
      "aspect_ratio": "SD1.5 - 2:3 portrait 512x768",
      "swap_dimensions": "Off",
      "upscale_factor": 1,
      "prescale_factor": 1,
      "batch_size": 1
    },
    "class_type": "CR Aspect Ratio",
    "_meta": {
      "title": "🔳 CR Aspect Ratio"
    }
  },
  "99": {
    "inputs": {
      "filename_prefix": "image",
      "images": [
        "64",
        5
      ]
    },
    "class_type": "SaveImage",
    "_meta": {
      "title": "Save Image"
    }
  },
  "101": {
    "inputs": {
      "grid_spacing": 0,
      "XY_flip": "False",
      "Y_label_orientation": "Horizontal",
      "cache_models": "True",
      "ksampler_output_image": "Images",
      "X": [
        "102",
        0
      ]
    },
    "class_type": "XY Plot",
    "_meta": {
      "title": "XY Plot"
    }
  },
  "102": {
    "inputs": {
      "batch_count": 1,
      "first_cfg": 4,
      "last_cfg": 4
    },
    "class_type": "XY Input: CFG Scale",
    "_meta": {
      "title": "XY Input: CFG Scale"
    }
  },
  "107": {
    "inputs": {
      "mode": "anime",
      "image": [
        "14",
        0
      ]
    },
    "class_type": "Lineart_Detector_Preprocessor",
    "_meta": {
      "title": "Lineart_Detector_Preprocessor"
    }
  },
  "108": {
    "inputs": {
      "images": [
        "107",
        0
      ]
    },
    "class_type": "PreviewImage",
    "_meta": {
      "title": "Preview Image"
    }
  },
  "109": {
    "inputs": {
      "include_face": true,
      "include_hand": true,
      "include_body": true,
      "image": [
        "14",
        0
      ]
    },
    "class_type": "Openpose_Detector_Preprocessor",
    "_meta": {
      "title": "Openpose_Detector_Preprocessor"
    }
  },
  "110": {
    "inputs": {
      "images": [
        "109",
        0
      ]
    },
    "class_type": "PreviewImage",
    "_meta": {
      "title": "Preview Image"
    }
  }
}
"""

def queue_prompt(prompt):
    p = {"prompt": prompt}
    data = json.dumps(p).encode('utf-8')
    req = request.Request("http://127.0.0.1:8188/prompt", data=data)
    request.urlopen(req)

def generate_images(positive_prompt, negative_prompt, seed, image_path):
    prompt = json.loads(prompt_text)
    # Set the text prompt for our positive CLIPTextEncode
    prompt["10"]["inputs"]["positive"] = positive_prompt
    prompt["10"]["inputs"]["negative"] = negative_prompt
    prompt["14"]["inputs"]["image"] = image_path
    
    queue_prompt(prompt)
    output_dir = "/Users/paresh/ComfyUI/output/"
    current_count = 56
    next_filename = f"image_{str(current_count + 1).zfill(5)}_.png"
    output_image_path = os.path.join(output_dir, next_filename)
    generated_image = PILImage.open(output_image_path)

    return generated_image

# Gradio Interface
seed = gr.Number(value=43857297359, label="Seed")
image = gr.Image(type="filepath", label="Upload Image")
positive_prompt = gr.Textbox(lines=2, placeholder="Enter positive prompt")
negative_prompt = gr.Textbox(lines=1, placeholder="Enter negative prompt")

interface = gr.Interface(
    fn=generate_images,
    inputs=[positive_prompt, negative_prompt, seed, image],
    outputs="text",
    title="Image Generation with Custom Prompts",
    description="Generate images in the style of GTA/Anime with customizable prompts with a image input.",
)

interface.launch()