Dream / app.py
multimodalart's picture
Update app.py
3d09f97 verified
raw
history blame
25.5 kB
# dream_app.py
import torch
import numpy as np
import gradio as gr
import spaces # Ensure spaces is installed if needed for GPU decorator
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModel, AutoConfig
import time
import re
from typing import List, Dict, Tuple, Optional
import torch.distributions as dists # Added import
# --- START: Copied Helper functions from generation_utils.py ---
# [Keep the copied functions: top_p_logits, top_k_logits, sample_tokens]
def top_p_logits(logits, top_p=None):
if top_p is None or top_p >= 1.0: return logits
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone(); sorted_indices_to_remove[..., 0] = 0
mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device).scatter_(-1, sorted_indices, sorted_indices_to_remove)
return logits.masked_fill(mask, torch.finfo(logits.dtype).min)
def top_k_logits(logits, top_k=None):
if top_k is None or top_k <= 0: return logits
top_k = min(top_k, logits.size(-1))
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
return logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False):
if temperature > 0: safe_temp = max(temperature, 1e-6); logits = logits / safe_temp
if top_p is not None and 0.0 < top_p < 1.0: logits = top_p_logits(logits, top_p)
if top_k is not None and top_k > 0: logits = top_k_logits(logits, top_k)
is_all_neg_inf = torch.all(logits == torch.finfo(logits.dtype).min, dim=-1, keepdim=True)
if torch.any(is_all_neg_inf): uniform_logits = torch.zeros_like(logits); logits = torch.where(is_all_neg_inf, uniform_logits, logits)
probs = torch.softmax(logits, dim=-1)
probs = torch.clamp(probs, min=0.0); probs = probs / probs.sum(dim=-1, keepdim=True); probs = torch.nan_to_num(probs, nan=0.0)
if temperature > 0:
try: x0 = dists.Categorical(probs=probs).sample(); confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1)
except Exception as e: print(f"Warning: Sampling failed: {e}. Argmax fallback."); confidence, x0 = probs.max(dim=-1)
else: confidence, x0 = probs.max(dim=-1)
if margin_confidence: sorted_probs, _ = torch.sort(probs, dim=-1, descending=True); top1_probs = sorted_probs[..., 0]; top2_probs = sorted_probs[..., 1] if sorted_probs.shape[-1] > 1 else top1_probs; confidence = top1_probs - top2_probs
if neg_entropy: epsilon = 1e-10; log_probs = torch.log(probs + epsilon); confidence = torch.sum(probs * log_probs, dim=-1)
confidence = torch.nan_to_num(confidence, nan=0.0)
return confidence, x0
# --- END: Copied Helper functions ---
# [Keep model loading, constants as before]
# Load model configuration to get special token IDs
config = AutoConfig.from_pretrained("Dream-org/Dream-v0-Instruct-7B", trust_remote_code=True)
model_path = "Dream-org/Dream-v0-Instruct-7B"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using device: {device}")
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
print("Loading model...")
model = AutoModel.from_pretrained(
model_path,
torch_dtype=torch.bfloat16 if device == 'cuda' else torch.float32,
trust_remote_code=True,
attn_implementation="sdpa"
)
model = model.to(device).eval()
print("Model loaded.")
MASK_TOKEN = tokenizer.mask_token
MASK_ID = tokenizer.mask_token_id
PAD_ID = tokenizer.pad_token_id
EOS_ID = tokenizer.eos_token_id
if MASK_ID is None: raise ValueError("Cannot determine MASK_ID.")
SPECIAL_TOKEN_IDS = {PAD_ID, EOS_ID, MASK_ID}
try:
IM_START_ID = tokenizer.convert_tokens_to_ids("<|im_start|>")
IM_END_ID = tokenizer.convert_tokens_to_ids("<|im_end|>")
SPECIAL_TOKEN_IDS.add(IM_START_ID)
SPECIAL_TOKEN_IDS.add(IM_END_ID)
except KeyError: IM_START_ID, IM_END_ID = None, None
# --- Helper Functions ---
def parse_constraints(constraints_text: str) -> Dict[int, List[int]]:
constraints = {}
if not constraints_text: return constraints
parts = constraints_text.split(',')
for part in parts:
part = part.strip()
if ':' not in part: continue
pos_str, word = part.split(':', 1)
try:
pos = int(pos_str.strip())
word = word.strip()
token_ids = []
if word: text_to_encode = (" " + word) if (pos > 0 and not word.startswith(" ")) else word; token_ids = tokenizer.encode(text_to_encode, add_special_tokens=False)
if token_ids and pos >= 0: constraints[pos] = token_ids
elif not token_ids and word: print(f"Warning: Could not tokenize constraint word '{word}'")
except ValueError: print(f"Warning: Invalid position '{pos_str}' in constraint part '{part}'")
except Exception as e: print(f"Warning: Error processing constraint '{part}': {e}")
return constraints
# Removed format_chat_history as history will be in the correct format
def apply_constraints_to_state(
x: torch.Tensor, prompt_length: int, total_length: int,
parsed_constraints: Dict[int, List[int]], current_step: Optional[int] = None
) -> torch.Tensor:
modified_x = x.clone()
for rel_pos, word_token_ids in parsed_constraints.items():
abs_start_pos = prompt_length + rel_pos; abs_end_pos = abs_start_pos + len(word_token_ids)
if abs_start_pos < total_length and abs_end_pos <= total_length:
try: constraint_tensor = torch.tensor(word_token_ids, dtype=torch.long, device=modified_x.device); modified_x[0, abs_start_pos:abs_end_pos] = constraint_tensor
except IndexError: print(f"Warning (Step {current_step}): Constraint idx error at {rel_pos}")
except Exception as e: print(f"Warning (Step {current_step}): Constraint apply error at {rel_pos}: {e}")
return modified_x
# --- Core Generation Logic with Live Visualization ---
@spaces.GPU
@torch.no_grad()
def generate_dream_response(
history: List[Dict[str, str]], # MODIFIED: Expect List[Dict]
gen_length: int,
steps: int,
constraints_text: str,
temperature: float,
top_p: Optional[float],
top_k: Optional[int],
alg: str,
alg_temp: Optional[float],
visualization_delay: float
): # Removed -> type hint for cleaner yield handling
""" Generates text step-by-step and yields visualization states live. """
if not history or history[-1]["role"] != "user": # Check last message is from user
yield history, [("No user message found to respond to.", "red")]
return
# --- 1. Preparation ---
# History is already formatted for the template
parsed_constraints = parse_constraints(constraints_text)
try:
# apply_chat_template expects List[Dict[str, str]]
inputs = tokenizer.apply_chat_template(
history, # Use history directly
return_tensors="pt",
return_dict=True,
add_generation_prompt=True # Crucial: Adds the "<|im_start|>assistant\n" prompt
)
input_ids = inputs.input_ids.to(device)
prompt_attention_mask = inputs.attention_mask.to(device) if 'attention_mask' in inputs else torch.ones_like(input_ids)
prompt_length = input_ids.shape[1] # Length *after* adding the generation prompt
except Exception as e:
print(f"Error applying chat template: {e}")
# Yield current history and error message for visualization
yield history, [("Error preparing input.", "red")]
return
eps = 1e-3
top_p_val = top_p if top_p is not None and 0.0 < top_p < 1.0 else None
top_k_val = top_k if top_k is not None and top_k > 0 else None
alg_temp_val = alg_temp if alg in ['maskgit_plus', 'topk_margin', 'entropy'] and alg_temp is not None and alg_temp > 0 else None
# --- 2. Initialize Generation State ---
total_length = prompt_length + gen_length
initial_generation_part = torch.full((1, gen_length), MASK_ID, dtype=torch.long, device=device)
# input_ids already includes the assistant prompt, so just append masks
x = torch.cat((input_ids, initial_generation_part), dim=1)
# --- Prepare Attention Mask for SDPA ---
generation_attention_mask = torch.ones((1, gen_length), dtype=torch.long, device=device)
# prompt_attention_mask corresponds to input_ids (which includes assistant prompt)
full_attention_mask_long = torch.cat((prompt_attention_mask, generation_attention_mask), dim=1)
attention_mask_for_model = full_attention_mask_long.to(model.dtype)
large_neg_val = torch.finfo(model.dtype).min
attention_mask_for_model = (1.0 - attention_mask_for_model) * large_neg_val
attention_mask_for_model = attention_mask_for_model.unsqueeze(1).unsqueeze(2) # [B, 1, 1, N]
# --- Timesteps ---
timesteps = torch.linspace(1, eps, steps + 1, device=device)
# Apply initial constraints (relative to start of generation = prompt_length)
x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=-1)
# --- 3. Visualization & History Setup ---
previous_tokens_vis = None
# MODIFIED: Append placeholder assistant message to the history state *before* looping
history.append({"role": "assistant", "content": ""})
# --- 4. Initial Yield (Masked State) ---
initial_generated_tokens = x[0, prompt_length:].cpu()
vis_data_initial = []
for tok_id in initial_generated_tokens.tolist():
display_token = MASK_TOKEN; color = "#444444"
vis_data_initial.append((display_token, color))
previous_tokens_vis = initial_generated_tokens
# Yield the history (which now includes the empty assistant message) and initial vis
yield history, vis_data_initial
time.sleep(visualization_delay)
# --- 5. Step-by-Step Diffusion Loop ---
try:
start_time = time.time()
for i in range(steps):
mask_index = (x == MASK_ID)
if not mask_index.any(): break # Stop early
outputs = model(input_ids=x, attention_mask=attention_mask_for_model, return_dict=True)
logits = outputs.logits
logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1) # Align logits
mask_logits = logits[mask_index]
if mask_logits.numel() == 0: break # Stop early
t = timesteps[i]; s = timesteps[i + 1]
x_new_masked_part = torch.full_like(x[mask_index], MASK_ID, device=device, dtype=torch.long)
# [Keep sampling/remasking logic ('origin' and confidence-based) exactly the same]
if alg == 'origin':
p_transfer = (1.0 - s / t) if i < steps - 1 else 1.0
num_masked = mask_logits.shape[0]
transfer_indices_relative = torch.rand(num_masked, device=device) < p_transfer
logits_to_sample = mask_logits[transfer_indices_relative]
if logits_to_sample.numel() > 0: _, sampled_tokens = sample_tokens(logits_to_sample, temperature=temperature, top_p=top_p_val, top_k=top_k_val); x_new_masked_part[transfer_indices_relative] = sampled_tokens
else:
use_margin=(alg == 'topk_margin'); use_entropy=(alg == 'entropy')
confidence, x0_candidates = sample_tokens(mask_logits, temperature=temperature, top_p=top_p_val, top_k=top_k_val, margin_confidence=use_margin, neg_entropy=use_entropy)
num_mask_token = mask_logits.shape[0]
target_num_revealed_float = num_mask_token * (1.0 - s / t)
number_transfer_tokens = int(target_num_revealed_float) if i < steps - 1 else num_mask_token
if number_transfer_tokens > 0:
num_samples = min(number_transfer_tokens, num_mask_token)
if num_samples > 0:
transfer_indices_relative = torch.tensor([], dtype=torch.long, device=device)
if alg_temp_val is None or alg_temp_val <= 0: # Top-k confidence
sort_metric = confidence if alg != 'entropy' else -confidence
k_topk = min(num_samples, sort_metric.numel())
if k_topk > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_topk)
else: # Sample based on confidence temperature
if confidence.numel() > 0:
conf_probs = confidence / alg_temp_val; conf_probs = torch.nan_to_num(conf_probs, nan=0.0, posinf=1e9, neginf=-1e9); conf_probs = torch.clamp(conf_probs - conf_probs.max(), min=-30); conf_probs = F.softmax(conf_probs, dim=-1); conf_probs = torch.clamp(conf_probs, min=0.0); conf_probs = torch.nan_to_num(conf_probs, nan=0.0)
prob_sum = conf_probs.sum(); target_sum_tensor = torch.tensor(1.0, device=device, dtype=prob_sum.dtype)
if not torch.isclose(prob_sum, target_sum_tensor, atol=1e-4) and prob_sum > 0: safe_prob_sum = torch.max(prob_sum, torch.tensor(1e-12, device=device, dtype=prob_sum.dtype)); conf_probs = conf_probs / safe_prob_sum
final_prob_sum_check = conf_probs.sum()
if conf_probs.numel() > 0 and num_samples > 0 and torch.all(conf_probs >= 0) and torch.isclose(final_prob_sum_check, target_sum_tensor, atol=1e-4):
try: transfer_indices_relative = torch.multinomial(conf_probs, num_samples=num_samples, replacement=False)
except RuntimeError as e: print(f"Warning step {i}: Multinomial failed ('{e}'). Fallback."); sort_metric = confidence if alg != 'entropy' else -confidence; k_fallback = min(num_samples, sort_metric.numel()); if k_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_fallback)
else: sort_metric = confidence if alg != 'entropy' else -confidence; k_fallback = min(num_samples, sort_metric.numel()); if k_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_fallback)
# Apply transfer
if transfer_indices_relative.numel() > 0:
valid_indices = transfer_indices_relative < x0_candidates.shape[0]; valid_transfer_indices = transfer_indices_relative[valid_indices]
if valid_transfer_indices.numel() > 0:
if valid_transfer_indices.max() < x_new_masked_part.shape[0]: x_new_masked_part[valid_transfer_indices] = x0_candidates[valid_transfer_indices].clone()
else: print(f"Warning step {i}: transfer_indices OOB for x_new_masked_part.")
x[mask_index] = x_new_masked_part # Update state
# --- Apply Constraints ---
# Remember prompt_length now includes the assistant prompt turn
x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=i)
# --- Yield Visualization ---
current_generated_tokens = x[0, prompt_length:].cpu()
vis_data = []
# [Keep visualization formatting logic the same]
for j in range(gen_length):
current_tok_id = current_generated_tokens[j].item()
previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID
try: decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False); display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token
except Exception: display_token = f"[ID:{current_tok_id}]"
color = None; token_to_display = display_token
if current_tok_id == MASK_ID: color = "#444444"
elif previous_tok_id == MASK_ID: color = "#66CC66"
else: color = "#6699CC"
should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID)
if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None
if token_to_display: vis_data.append((token_to_display, color))
previous_tokens_vis = current_generated_tokens
# MODIFIED: Update the *content* of the last history item
intermediate_response_tokens = x[0, prompt_length:]
intermediate_response_text = tokenizer.decode(intermediate_response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True).strip()
history[-1]["content"] = intermediate_response_text # Update last dict entry
# Yield the updated history list and current vis data
yield history, vis_data
time.sleep(visualization_delay)
end_time = time.time()
print(f"Dream generation finished in {end_time - start_time:.2f} seconds.")
# --- 6. Final Processing & Yield ---
final_sequence = x[0]
response_tokens = final_sequence[prompt_length:]
final_response_text = tokenizer.decode(response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True).strip()
# Update the final content in the history object
history[-1]["content"] = final_response_text
final_generated_tokens = x[0, prompt_length:].cpu()
vis_data_final = []
# [Keep final visualization formatting logic the same]
for j in range(gen_length):
current_tok_id = final_generated_tokens[j].item()
previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID
try: decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False); display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token
except Exception: display_token = f"[ID:{current_tok_id}]"
color = None; token_to_display = display_token
if current_tok_id == MASK_ID: color = "#444444"
elif previous_tok_id == MASK_ID: color = "#66CC66"
else: color = "#6699CC"
should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID)
if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None
if token_to_display: vis_data_final.append((token_to_display, color))
# Yield final history and visualization
yield history, vis_data_final
print("Visualization streaming complete.")
except Exception as e:
print(f"Error during generation or processing: {e}")
import traceback
traceback.print_exc()
# Set error message in the last history item? Or yield separate error?
# Let's just yield the current history and error vis
history[-1]["content"] = f"Error: {e}" # Put error in assistant message
yield history, [("Error during generation.", "red")]
return
# --- Gradio UI ---
css = '''
.category-legend{display:none}
button{min-height: 60px}
'''
def create_chatbot_demo():
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
gr.Markdown("# Dream 7B - Diffusion Language Model Demo")
gr.Markdown(
"[[Model Card](https://huggingface.co/Dream-org/Dream-v0-Instruct-7B)] "
"[[Blog](https://hkunlp.github.io/blog/2025/dream/)]"
)
# STATE: No explicit state needed if chatbot manages it via input/output
with gr.Row():
with gr.Column(scale=3):
# MODIFIED: Use type="messages"
chatbot_ui = gr.Chatbot(
label="Conversation",
type="messages", # Use dictionary format
height=500,
show_copy_button=True,
bubble_full_width=False,
)
with gr.Group():
with gr.Row():
user_input = gr.Textbox(
label="Your Message", placeholder="Type your message here...",
scale=7, autofocus=True, show_label=False, container=False
)
send_btn = gr.Button("Send", scale=1, variant="primary")
constraints_input = gr.Textbox(
label="Word Constraints (Optional)",
info="Format: 'pos:word, pos:word,...'. Example: '0:Once, 5:upon, 10:time'",
placeholder="0:Hello, 10:world", value=""
)
with gr.Column(scale=2):
output_vis = gr.HighlightedText(
label="Denoising Process Visualization",
combine_adjacent=True, show_legend=False, interactive=False
)
# REMOVED: Separate response text display
with gr.Accordion("Generation Settings", open=False):
# [Settings sliders remain the same]
with gr.Row():
gen_length = gr.Slider(minimum=16, maximum=512, value=128, step=8, label="Max New Tokens")
steps = gr.Slider(minimum=8, maximum=512, value=128, step=8, label="Diffusion Steps")
with gr.Row():
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Temperature (0 = greedy)")
alg_temp = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.05, label="Remasking Temp (Confidence Algs)")
with gr.Row():
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-P (0 disables)")
top_k = gr.Slider(minimum=0, maximum=200, value=0, step=5, label="Top-K (0 disables)")
with gr.Row():
remasking_strategy = gr.Radio(choices=['origin', 'maskgit_plus', 'topk_margin', 'entropy'], value='entropy', label="Remasking Strategy (Algorithm)")
with gr.Row():
visualization_delay = gr.Slider(minimum=0.0, maximum=0.5, value=0.03, step=0.01, label="Visualization Delay (seconds)")
clear_btn = gr.Button("Clear Conversation")
# --- Event Handlers ---
# MODIFIED: add_user_message uses dictionary format
def add_user_message(message: str, history: List[Dict[str, str]]):
"""Adds user message in dictionary format, clears input."""
if not message.strip():
gr.Warning("Please enter a message.")
return history, "" # Return unchanged history, don't clear input here
# Append user message as a dictionary
history.append({"role": "user", "content": message})
# Return updated history, clear input box
return history, ""
def clear_all():
"""Clears chatbot, visualization, and input."""
return [], [], "" # Chatbot, Vis, Input
# --- Connect UI elements ---
# Define the inputs for the generation function
# MODIFIED: Input is chatbot_ui (provides List[Dict])
generation_inputs = [
chatbot_ui, # Get history directly from chatbot component
gen_length, steps, constraints_input,
temperature, top_p, top_k, remasking_strategy, alg_temp,
visualization_delay
]
# Define the outputs for the generation function
# MODIFIED: Output history (List[Dict]) to chatbot_ui, vis_data to output_vis
generation_outputs = [chatbot_ui, output_vis]
# Handle Textbox Submission (Enter key)
submit_listener = user_input.submit(
fn=add_user_message, # Use modified function
inputs=[user_input, chatbot_ui], # Pass chatbot state
outputs=[chatbot_ui, user_input], # Update chatbot state, clear input
queue=False # User message add should be quick
).then(
fn=generate_dream_response,
inputs=generation_inputs,
outputs=generation_outputs, # Stream history to chatbot, vis to output_vis
show_progress="hidden"
)
# Handle Send Button Click
click_listener = send_btn.click(
fn=add_user_message, # Use modified function
inputs=[user_input, chatbot_ui], # Pass chatbot state
outputs=[chatbot_ui, user_input], # Update chatbot state, clear input
queue=False # User message add should be quick
).then(
fn=generate_dream_response,
inputs=generation_inputs,
outputs=generation_outputs, # Stream history to chatbot, vis to output_vis
show_progress="hidden"
)
# Clear Button Action
clear_btn.click(
clear_all, # Use modified clear function
inputs=[],
outputs=[chatbot_ui, output_vis, user_input], # Clear chatbot, vis, input
queue=False
)
return demo
# --- Launch ---
if __name__ == "__main__":
demo = create_chatbot_demo()
demo.queue().launch(debug=True, share=False)