import os import spaces import torch from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler import gradio as gr import random import tqdm from huggingface_hub import hf_hub_download from transformers import CLIPTextModel, CLIPTokenizer # Enable TQDM progress tracking tqdm.monitor_interval = 0 # Load the model from safetensors file def load_model(): model_path = hf_hub_download( repo_id="kayfahaarukku/AkashicPulse-v1.0", filename="AkashicPulse-v1.0-ft-ft.safetensors" ) # Initialize tokenizer and text encoder from standard SD 1.5 tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder") # Initialize pipeline with text encoder and tokenizer pipe = StableDiffusionPipeline.from_single_file( model_path, torch_dtype=torch.float16, use_safetensors=True, tokenizer=tokenizer, text_encoder=text_encoder, requires_safety_checker=False, safety_checker=None ) pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) return pipe # Load the pipeline pipe = load_model() # Function to generate an image @spaces.GPU def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()): pipe.to('cuda') if randomize_seed: seed = random.randint(0, 99999999) if use_defaults: prompt = f"{prompt}, masterpiece, best quality" negative_prompt = f"lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, signature, watermark, username, blurry, {negative_prompt}" generator = torch.manual_seed(seed) def callback(step, timestep, latents): progress(step / num_inference_steps) return width, height = map(int, resolution.split('x')) # Add empty dict for additional kwargs added_cond_kwargs = {"text_embeds": None, "time_ids": None} image = pipe( prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator, callback=callback, callback_steps=1, added_cond_kwargs=added_cond_kwargs ).images[0] torch.cuda.empty_cache() metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}" return image, seed, metadata_text # Define Gradio interface def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()): try: image, seed, metadata_text = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress) return image, seed, gr.update(value=metadata_text) except Exception as e: print(f"Error generating image: {str(e)}") raise e def reset_inputs(): return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True), gr.update(value='') with gr.Blocks(title="AkashicPulse v1.0 Demo", theme="NoCrypt/miku@1.2.1") as demo: gr.HTML( "