Jensin commited on
Commit
0a6a466
Β·
verified Β·
1 Parent(s): 9c89ed0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -65
app.py CHANGED
@@ -1,87 +1,74 @@
 
1
  from datasets import load_dataset
2
- import gradio as gr
3
- from gradio_client import Client
4
- import json, os, random, torch, spaces
5
  from diffusers import FluxPipeline, AutoencoderKL
6
- from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images
 
 
 
7
 
8
- # ───────────────────────────── 1. Device ────────────────────────────────────
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
 
11
- # ─────────────────────── 2. Image / FLUX pipeline ───────────────────────────
12
  pipe = FluxPipeline.from_pretrained(
13
  "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
14
  ).to(device)
15
  good_vae = AutoencoderKL.from_pretrained(
16
  "black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16
17
  ).to(device)
18
- pipe.flux_pipe_call_that_returns_an_iterable_of_images = (
19
- flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
20
- )
21
 
22
- # ───────────────────────── 3. LLM client (robust) ───────────────────────────
23
- def _first_working_client(candidates: list[str]) -> Client:
 
 
 
 
 
 
24
  """
25
- Try a list of Space URLs / repo-ids, return the first that gives a JSON config.
26
  """
27
- for src in candidates:
28
  try:
29
- print(f"[info] Trying LLM Space: {src}")
30
- c = Client(src, hf_token=os.getenv("HF_TOKEN")) # token optional
31
- # If this passes, the config was parsed as JSON
32
- c.view_api()
33
- print(f"[info] Selected LLM Space: {src}")
34
  return c
35
  except Exception as e:
36
- print(f"[warn] {src} not usable β†’ {e}")
37
- raise RuntimeError("No usable LLM Space found!")
38
 
39
- LLM_CANDIDATES = [
40
- "https://huggingfaceh4-zephyr-chat.hf.space", # direct URL
41
- "HuggingFaceH4/zephyr-chat", # repo slug
42
- "huggingface-projects/gemma-2-9b-it", # fallback Space
43
- ]
44
 
45
- llm_client = _first_working_client(LLM_CANDIDATES)
46
- CHAT_API = llm_client.view_api()[0]["api_name"] # safest way to get endpoint
47
-
48
- def call_llm(
49
- user_prompt: str,
50
- system_prompt: str = "You are a helpful creative assistant.",
51
- history: list | None = None,
52
- temperature: float = 0.7,
53
- top_p: float = 0.9,
54
- max_tokens: int = 1024,
55
- ) -> str:
56
  """
57
- Unified chat wrapper – works for both Zephyr and Gemma Spaces.
 
 
 
58
  """
59
- history = history or []
60
  try:
61
- result = llm_client.predict(
62
- user_prompt,
63
- system_prompt,
64
- history,
65
- temperature,
66
- top_p,
67
- max_tokens,
68
- api_name=CHAT_API,
69
- )
70
- # Some Spaces return string, some return (…, history) tuple
71
- if isinstance(result, str):
72
- return result.strip()
73
- return result[1][0][-1].strip()
74
- except Exception as e:
75
- print(f"[error] LLM call failed β†’ {e}")
76
  return "…"
77
 
78
- # ───────────────────────── 4. Persona dataset ───────────────────────────────
79
  ds = load_dataset("MohamedRashad/FinePersonas-Lite", split="train")
80
 
81
  def random_persona() -> str:
82
  return ds[random.randint(0, len(ds) - 1)]["persona"]
83
 
84
- # ─────────────────────────── 5. Prompt templates ───────────────────────────
85
  PROMPT_TEMPLATE = """Generate a character with this persona description:
86
 
87
  {persona_description}
@@ -102,9 +89,9 @@ WORLD_PROMPT = (
102
  "Respond with the description only."
103
  )
104
 
105
- # ─────────────────────── 6. Helper functions ───────────────────────────────
106
  def random_world() -> str:
107
- return call_llm(WORLD_PROMPT)
108
 
109
  @spaces.GPU(duration=75)
110
  def infer_flux(character_json):
@@ -132,7 +119,7 @@ def generate_character(world_desc: str, persona_desc: str,
132
  try:
133
  return json.loads(raw)
134
  except json.JSONDecodeError:
135
- # One retry
136
  raw = call_llm(
137
  PROMPT_TEMPLATE.format(
138
  persona_description=persona_desc,
@@ -142,17 +129,17 @@ def generate_character(world_desc: str, persona_desc: str,
142
  )
143
  return json.loads(raw)
144
 
145
- # ───────────────────────────── 7. UI ────────────────────────────────────────
146
  DESCRIPTION = """
147
- * Generates a character sheet (JSON) from a world + persona.
148
- * Appearance images via **FLUX-dev**; narrative via **Zephyr-chat** (or Gemma fallback).
149
- * Personas come from **FinePersonas-Lite**.
150
 
151
- Tip β†’ Spin the world, then shuffle personas to see very different heroes.
152
  """
153
 
154
  with gr.Blocks(title="Character Generator", theme="Nymbo/Nymbo_Theme") as demo:
155
- gr.Markdown("<h1 style='text-align:center'>πŸ§šβ€β™€οΈ Character Generator</h1>")
156
  gr.Markdown(DESCRIPTION.strip())
157
 
158
  with gr.Row():
@@ -180,4 +167,3 @@ with gr.Blocks(title="Character Generator", theme="Nymbo/Nymbo_Theme") as demo:
180
  btn_persona.click(random_persona, outputs=[persona_tb])
181
 
182
  demo.queue().launch(share=False)
183
-
 
1
+ # app.py ──────────────────────────────────────────────────────────────────────
2
  from datasets import load_dataset
3
+ import gradio as gr, json, os, random, torch, spaces
 
 
4
  from diffusers import FluxPipeline, AutoencoderKL
5
+ from gradio_client import Client
6
+ from live_preview_helpers import (
7
+ flux_pipe_call_that_returns_an_iterable_of_images as flux_iter,
8
+ )
9
 
10
+ # ─────────────────────────── 1. Device ─────────────────────────────────────
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
 
13
+ # ────────────────────── 2. FLUX image pipeline ─────────────────────────────
14
  pipe = FluxPipeline.from_pretrained(
15
  "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
16
  ).to(device)
17
  good_vae = AutoencoderKL.from_pretrained(
18
  "black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16
19
  ).to(device)
20
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_iter.__get__(pipe)
 
 
21
 
22
+ # ───────────────────────── 3. LLM client (robust) ──────────────────────────
23
+ LLM_SPACES = [
24
+ "https://huggingfaceh4-zephyr-chat.hf.space",
25
+ "HuggingFaceH4/zephyr-chat",
26
+ "huggingface-projects/gemma-2-9b-it",
27
+ ]
28
+
29
+ def first_live_space(space_ids: list[str]) -> Client:
30
  """
31
+ Return the first Space whose /chat endpoint answers a 1-token echo.
32
  """
33
+ for sid in space_ids:
34
  try:
35
+ print(f"[info] probing {sid}")
36
+ c = Client(sid, hf_token=os.getenv("HF_TOKEN"))
37
+ _ = c.predict("ping", 8, api_name="/chat") # simple health check
38
+ print(f"[info] using {sid}")
 
39
  return c
40
  except Exception as e:
41
+ print(f"[warn] {sid} unusable β†’ {e}")
42
+ raise RuntimeError("No live chat Space found!")
43
 
44
+ llm_client = first_live_space(LLM_SPACES)
45
+ CHAT_API = "/chat" # universal endpoint for TGI-style Spaces
 
 
 
46
 
47
+ def call_llm(prompt: str,
48
+ max_tokens: int = 256,
49
+ temperature: float = 0.6,
50
+ top_p: float = 0.9) -> str:
 
 
 
 
 
 
 
51
  """
52
+ Send a single-message chat to the Space. Extra sliders in the remote UI must
53
+ be supplied in positional order after the prompt, so we match Zephyr/Gemma:
54
+ [prompt, max_tokens, temperature, top_p, repeat_penalty, presence_penalty]
55
+ We pass only the first four; the Space will fill the rest with defaults.
56
  """
 
57
  try:
58
+ return llm_client.predict(
59
+ prompt, max_tokens, temperature, top_p, api_name=CHAT_API
60
+ ).strip()
61
+ except Exception as exc:
62
+ print(f"[error] LLM failure β†’ {exc}")
 
 
 
 
 
 
 
 
 
 
63
  return "…"
64
 
65
+ # ──────────────────────── 4. Persona dataset ──────────────────────────────
66
  ds = load_dataset("MohamedRashad/FinePersonas-Lite", split="train")
67
 
68
  def random_persona() -> str:
69
  return ds[random.randint(0, len(ds) - 1)]["persona"]
70
 
71
+ # ─────────────────────────── 5. Text prompts ───────────────────────────────
72
  PROMPT_TEMPLATE = """Generate a character with this persona description:
73
 
74
  {persona_description}
 
89
  "Respond with the description only."
90
  )
91
 
92
+ # ───────────────────────── 6. Helper functions ─────────────────────────────
93
  def random_world() -> str:
94
+ return call_llm(WORLD_PROMPT, max_tokens=120)
95
 
96
  @spaces.GPU(duration=75)
97
  def infer_flux(character_json):
 
119
  try:
120
  return json.loads(raw)
121
  except json.JSONDecodeError:
122
+ # retry once if the model didn’t return valid JSON
123
  raw = call_llm(
124
  PROMPT_TEMPLATE.format(
125
  persona_description=persona_desc,
 
129
  )
130
  return json.loads(raw)
131
 
132
+ # ─────────────────────────── 7. Gradio UI ──────────────────────────────────
133
  DESCRIPTION = """
134
+ * Generates a JSON character sheet from a world + persona.
135
+ * Appearance images via **FLUX-dev**; story text via Zephyr-chat or Gemma fallback.
136
+ * Personas sampled from **FinePersonas-Lite**.
137
 
138
+ Tip β†’ Shuffle the world then persona for rapid inspiration.
139
  """
140
 
141
  with gr.Blocks(title="Character Generator", theme="Nymbo/Nymbo_Theme") as demo:
142
+ gr.Markdown("<h1 style='text-align:center'>πŸ§β€β™‚οΈ Character Generator</h1>")
143
  gr.Markdown(DESCRIPTION.strip())
144
 
145
  with gr.Row():
 
167
  btn_persona.click(random_persona, outputs=[persona_tb])
168
 
169
  demo.queue().launch(share=False)