#!/usr/bin/env python import gradio as gr import requests import io import random import os import time import numpy as np import subprocess import torch import json import uuid import spaces from typing import Tuple from transformers import AutoProcessor, AutoModelForCausalLM from PIL import Image from deep_translator import GoogleTranslator from datetime import datetime from theme import theme from typing import Tuple from mistralai import Mistral from fastapi import FastAPI app = FastAPI() api_key = os.getenv("MISTRAL_KEY") Mistralclient = Mistral(api_key=api_key) API_TOKEN = os.getenv("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} timeout = 100 def flip_image(x): return np.fliplr(x) def clear(): return None def change_tab(): return gr.Tabs.update(selected=1) def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=100, width=896, height=1152): if prompt == "" or prompt == None: return None if lora_id.strip() == "" or lora_id == None: lora_id = "black-forest-labs/FLUX.1-dev" key = random.randint(0, 999) API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip() API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")]) headers = {"Authorization": f"Bearer {API_TOKEN}"} # prompt = GoogleTranslator(source='ru', target='en').translate(prompt) # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') prompt = GoogleTranslator(source='ru', target='en').translate(prompt) print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." print(f'\033[1mGeneration {key}:\033[0m {prompt}') # If seed is -1, generate a random seed and use it if seed == -1: seed = random.randint(1, 1000000000) # Prepare the payload for the API call, including width and height payload = { "inputs": prompt, "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed != -1 else random.randint(1, 1000000000), "strength": strength, "parameters": { "width": width, # Pass the width to the API "height": height # Pass the height to the API } } response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) if response.status_code != 200: print(f"Error: Failed to get image. Response status: {response.status_code}") print(f"Response content: {response.text}") if response.status_code == 503: raise gr.Error(f"{response.status_code} : The model is being loaded") raise gr.Error(f"{response.status_code}") try: image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})') return image, seed except Exception as e: print(f"Error when trying to open the image: {e}") return None examples = [ "a beautiful woman with blonde hair and blue eyes", "a beautiful woman with brown hair and grey eyes", "a beautiful woman with black hair and brown eyes", ] def encode_image(image_path): """Encode the image to base64.""" try: # Open the image file image = Image.open(image_path).convert("RGB") # Resize the image to a height of 512 while maintaining the aspect ratio base_height = 512 h_percent = (base_height / float(image.size[1])) w_size = int((float(image.size[0]) * float(h_percent))) image = image.resize((w_size, base_height), Image.LANCZOS) # Convert the image to a byte stream buffered = BytesIO() image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") return img_str except FileNotFoundError: print(f"Error: The file {image_path} was not found.") return None except Exception as e: # Add generic exception handling print(f"Error: {e}") return None def feifeichat(image): try: model = "pixtral-large-2411" # Define the messages for the chat base64_image = encode_image(image) messages = [{ "role": "user", "content": [ { "type": "text", "text": "Please provide a detailed description of this photo" }, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{base64_image}" }, ], "stream": False, }] partial_message = "" for chunk in Mistralclient.chat.stream(model=model, messages=messages): if chunk.data.choices[0].delta.content is not None: partial_message = partial_message + chunk.data.choices[ 0].delta.content yield partial_message except Exception as e: # 添加通用异常处理 print(f"Error: {e}") return "Please upload a photo" css = """ footer{display:none !important} #app-container { max-width: 930px; margin-left: auto; margin-right: auto; } """ with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app: gr.HTML("
🎨 FLUX.1-Dev with LoRA 🇬🇧
") with gr.Tabs() as tabs: with gr.TabItem(label="Image To Prompt", visible=True, id=1): with gr.Row(): with gr.Column(): input_img = gr.Image(label="Input Picture 🖼️",height=320,type="filepath") submit_btn = gr.Button(value="Submit", variant='primary') with gr.Column(): output_text = gr.Textbox(label="Flux Prompt ✍️", show_copy_button = True) clr_button =gr.Button("Clear 🗑️ ",variant="primary", elem_id="clear_button") clr_button.click(lambda: (None, None), None, [input_img, output_text], queue=False, show_api=False) submit_btn.click(feifeichat, [input_img], [output_text]) with gr.TabItem(label="Text to Image", visible=True, id=0): with gr.Column(elem_id="app-container"): with gr.Row(): with gr.Column(elem_id="prompt-container"): with gr.Group(): with gr.Row(): text_prompt = gr.Textbox(label="Image Prompt ✍️", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input") with gr.Row(): with gr.Accordion("🎨 Lora trigger words", open=False): gr.Markdown(""" - **Canopus-Pencil-Art-LoRA**: Pencil Art - **Flux-Realism-FineDetailed**: Fine Detailed - **Fashion-Hut-Modeling-LoRA**: Modeling - **SD3.5-Large-Turbo-HyperRealistic-LoRA**: hyper realistic - **Flux-Fine-Detail-LoRA**: Super Detail - **SD3.5-Turbo-Realism-2.0-LoRA**: Turbo Realism - **Canopus-LoRA-Flux-UltraRealism-2.0**: Ultra realistic - **Canopus-Pencil-Art-LoRA**: Pencil Art - **SD3.5-Large-Photorealistic-LoRA**: photorealistic - **Flux.1-Dev-LoRA-HDR-Realism**: HDR - **prithivMLmods/Ton618-Epic-Realism-Flux-LoRA**: Epic Realism - **john-singer-sargent-style**: John Singer Sargent Style - **alphonse-mucha-style**: Alphonse Mucha Style - **ultra-realistic-illustration**: ultra realistic illustration - **eye-catching**: eye-catching - **john-constable-style**: John Constable Style - **film-noir**: in the style of FLMNR - **flux-lora-pro-headshot**: PROHEADSHOT """) with gr.Row(): custom_lora = gr.Dropdown([" ", "prithivMLmods/Canopus-Pencil-Art-LoRA", "prithivMLmods/Flux-Realism-FineDetailed", "prithivMLmods/Fashion-Hut-Modeling-LoRA", "prithivMLmods/SD3.5-Large-Turbo-HyperRealistic-LoRA", "prithivMLmods/Flux-Fine-Detail-LoRA", "prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA", "hugovntr/flux-schnell-realism", "fofr/sdxl-deep-down", "prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", "prithivMLmods/Canopus-Realism-LoRA", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "prithivMLmods/SD3.5-Large-Photorealistic-LoRA", "prithivMLmods/Flux.1-Dev-LoRA-HDR-Realism", "prithivMLmods/Ton618-Epic-Realism-Flux-LoRA", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "dvyio/flux-lora-pro-headshot"], label="Custom LoRA",) with gr.Row(): with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"): negative_prompt = gr.Textbox(label="Negative Prompt", lines=5, placeholder="What should not be in the image", value=" (visible hand:1.3), (ugly:1.3), (duplicate:1.2), (morbid:1.1), (mutilated:1.1), out of frame, bad face, extra fingers, mutated hands, (poorly drawn hands:1.1), (poorly drawn face:1.3), (mutation:1.3), (deformed:1.3), blurry, (bad anatomy:1.1), (bad proportions:1.2), (extra limbs:1.1), cloned face, (disfigured:1.2), gross proportions, malformed limbs, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), fused fingers, too many fingers, (long neck:1.2), sketched by bad-artist, (bad-image-v2-39000:1.3) ") with gr.Row(): width = gr.Slider(label="Image Width", value=896, minimum=64, maximum=1216, step=32) height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32) strength = gr.Slider(label="Prompt Strength", value=100, minimum=0, maximum=100, step=1) steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1) cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5) seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ 2S a Karras", "DPM2 a Karras", "DPM2 Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "Euler", "Euler CFG PP", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "DDIM", "LMS Karras", "PLMS", "UniPC", "UniPC BH2"]) with gr.Row(): with gr.Accordion("🫘Seed", open=False): seed_output = gr.Textbox(label="Seed Used", elem_id="seed-output") # Add a button to trigger the image generation with gr.Row(): text_button = gr.Button("Generate Image 🎨", variant='primary', elem_id="gen-button") clear_prompt =gr.Button("Clear Prompt 🗑️",variant="primary", elem_id="clear_button") clear_prompt.click(lambda: (None), None, [text_prompt], queue=False, show_api=False) with gr.Group(): with gr.Row(): image_output = gr.Image(type="pil", label="Image Output", format="png", show_share_button=False, elem_id="gallery") with gr.Group(): with gr.Row(): gr.Examples( examples = examples, inputs = [text_prompt], ) with gr.Group(): with gr.Row(): clear_results = gr.Button(value="Clear Image 🗑️", variant="primary", elem_id="clear_button") clear_results.click(lambda: (None), None, [image_output], queue=False, show_api=False) text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output]) with gr.TabItem(label="Flip Image", visible=True, id=2): with gr.Row(): image_input = gr.Image() image_output = gr.Image(format="png") with gr.Row(): image_button = gr.Button("Run", variant='primary') image_button.click(flip_image, inputs=image_input, outputs=image_output, concurrency_limit=2) with gr.TabItem(label="Tips", visible=True, id=3): with gr.Row(): gr.Markdown( """

How to Use

  1. Enter a detailed description of the image you want to create.
  2. Adjust advanced settings if desired (tap to expand).
  3. Tap "Generate Image" and wait for your creation!

Tip: Be specific in your description for best results!

""" ) app.queue(default_concurrency_limit=200, max_size=200) # <-- Sets up a queue with default parameters if __name__ == "__main__": app.launch(show_api=False, share=False)