dnm3d / app.py
geyik1's picture
Upload 3 files
436008f verified
raw
history blame
4.13 kB
import gradio as gr
import torch
import os
import sys
from huggingface_hub import login
from diffusers import StableDiffusionPipeline
import io
import base64
from PIL import Image
import numpy as np
# Force CPU usage if needed
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# More details about the environment
print(f"Gradio version: {gr.__version__}")
print(f"Python version: {sys.version}")
# Hugging Face API token'ı - önce environment variable olarak ara,
# sonra Hugging Face Secrets sisteminde ara
hf_token = os.environ.get("HUGGINGFACE_TOKEN")
if hf_token:
print("Found HUGGINGFACE_TOKEN in environment variables")
# Token ile giriş yap
login(token=hf_token)
print("Logged in with Hugging Face token")
else:
print("HUGGINGFACE_TOKEN not found in environment variables")
# Global model variable
pipe = None
def load_model():
global pipe
try:
print("Loading Stable Diffusion model for 3D-style rendering...")
# Use a lightweight model that can generate game icons
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
safety_checker=None,
requires_safety_checker=False
).to(device)
print("Model loaded successfully!")
return True
except Exception as e:
print(f"Error loading model: {str(e)}")
# Fallback to a simple image generation
return False
def generate_3d_icon(prompt, seed=0, guidance_scale=7.5, num_inference_steps=20):
try:
print(f"Generating 3D icon with prompt: {prompt}")
if pipe is None:
raise Exception("Model not loaded")
# Enhance prompt for 3D game icon style
enhanced_prompt = f"3D rendered game icon, {prompt}, clean background, vibrant colors, high quality, digital art, professional game asset"
# Set seed for reproducibility
if seed > 0:
generator = torch.Generator(device=device).manual_seed(seed)
else:
generator = None
# Generate image
with torch.no_grad():
image = pipe(
enhanced_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
height=512,
width=512
).images[0]
return image
except Exception as e:
print(f"Error generating icon: {str(e)}")
# Return a simple placeholder image
placeholder = Image.new('RGB', (512, 512), color='lightblue')
return placeholder
# Create Gradio interface
def create_interface():
# Load model first
model_loaded = load_model()
interface = gr.Interface(
fn=generate_3d_icon,
inputs=[
gr.Textbox(label="Prompt", placeholder="Describe your game icon", value="galatasaray"),
gr.Slider(minimum=0, maximum=1000, value=0, step=1, label="Seed"),
gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale"),
gr.Slider(minimum=10, maximum=50, value=20, step=1, label="Inference Steps")
],
outputs=gr.Image(type="pil", label="Generated Game Icon"),
title="3D Game Icon Generator",
description="Generate 3D-style game icons using AI",
examples=[
["fantasy sword game icon", 42, 7.5, 20],
["space shooter game icon", 123, 7.5, 20],
["puzzle game icon with gems", 456, 7.5, 20]
]
)
return interface
# Launch the interface
if __name__ == "__main__":
try:
interface = create_interface()
print("Launching interface...")
interface.launch(
share=False,
server_name="0.0.0.0",
server_port=7860,
show_error=True
)
except Exception as e:
print(f"Error launching interface: {str(e)}")