Spaces:
Running
on
Zero
Running
on
Zero
# dream_app.py | |
import torch | |
import numpy as np | |
import gradio as gr | |
import spaces # Ensure spaces is installed if needed for GPU decorator | |
import torch.nn.functional as F | |
from transformers import AutoTokenizer, AutoModel, AutoConfig | |
import time | |
import re | |
from typing import List, Dict, Tuple, Optional | |
import torch.distributions as dists # Added import | |
# --- START: Copied Helper functions from generation_utils.py --- | |
# These are needed because we are reimplementing the sampling loop locally. | |
def top_p_logits(logits, top_p=None): | |
""" Applies top-p filtering to logits. """ | |
if top_p is None or top_p >= 1.0: | |
return logits | |
sorted_logits, sorted_indices = torch.sort(logits, descending=True) | |
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) | |
sorted_indices_to_remove = cumulative_probs > top_p | |
# Shift the indices to the right to keep the first token above the threshold | |
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() | |
sorted_indices_to_remove[..., 0] = 0 | |
mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device) | |
mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove) | |
logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min) | |
return logits | |
def top_k_logits(logits, top_k=None): | |
""" Applies top-k filtering to logits. """ | |
if top_k is None or top_k <= 0: | |
return logits | |
top_k = min(top_k, logits.size(-1)) # Safety check | |
# Remove all tokens with a probability less than the last token of the top-k | |
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] | |
logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min) | |
return logits | |
def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False): | |
""" Samples tokens based on logits and calculates confidence. """ | |
if temperature > 0: | |
logits = logits / temperature | |
if top_p is not None and top_p < 1.0: # Apply top_p if valid | |
logits = top_p_logits(logits, top_p) | |
if top_k is not None and top_k > 0: # Apply top_k if valid | |
logits = top_k_logits(logits, top_k) | |
# Ensure logits are not all -inf after filtering, if so, sample uniformly? Or handle error. | |
# For simplicity, assume valid logits after filtering. If not, sampling might fail. | |
# Add a small epsilon to prevent log(0) or issues with all -inf logits | |
logits = torch.where(logits == torch.finfo(logits.dtype).min, torch.full_like(logits, -1e9), logits) | |
probs = torch.softmax(logits, dim=-1) | |
if temperature > 0: | |
try: | |
# Check for NaNs or Infs in probs before sampling | |
if torch.isnan(probs).any() or torch.isinf(probs).any(): | |
print("Warning: NaN or Inf detected in probabilities before sampling. Attempting to recover.") | |
# Simple recovery: Sample from uniform distribution or highest prob token | |
probs = torch.nan_to_num(probs, nan=0.0, posinf=0.0, neginf=0.0) | |
if probs.sum() == 0: # If all probabilities became zero | |
print("Warning: All probabilities became zero. Sampling uniformly.") | |
probs = torch.ones_like(probs) / probs.shape[-1] | |
else: | |
probs = probs / probs.sum(dim=-1, keepdim=True) # Re-normalize | |
x0 = dists.Categorical(probs=probs).sample() | |
confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1) | |
except Exception as e: # Catch broader exceptions during sampling | |
print(f"Warning: Error during Categorical sampling: {e}. Falling back to argmax.") | |
confidence, x0 = probs.max(dim=-1) | |
else: | |
confidence, x0 = probs.max(dim=-1) | |
if margin_confidence: | |
sorted_probs, _ = torch.sort(probs, dim=-1, descending=True) | |
# Ensure there are at least 2 probabilities to compare | |
top1_probs = sorted_probs[..., 0] | |
top2_probs = sorted_probs[..., 1] if sorted_probs.shape[-1] > 1 else top1_probs # Handle case with only 1 possible token | |
confidence = top1_probs - top2_probs | |
if neg_entropy: | |
epsilon = 1e-10 | |
log_probs = torch.log(probs + epsilon) | |
confidence = torch.sum(probs * log_probs, dim=-1) # Should be negative entropy | |
return confidence, x0 | |
# --- END: Copied Helper functions --- | |
# Load model configuration to get special token IDs | |
config = AutoConfig.from_pretrained("Dream-org/Dream-v0-Instruct-7B", trust_remote_code=True) | |
# Use AutoModel for the base model loading, relying on trust_remote_code=True | |
# for the custom DreamModel class and generation mixin. | |
model_path = "Dream-org/Dream-v0-Instruct-7B" | |
# Determine device | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
print(f"Using device: {device}") | |
# Load model and tokenizer | |
print("Loading tokenizer...") | |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) | |
print("Loading model...") | |
# Ensure torch_dtype is set appropriately for your hardware if needed | |
model = AutoModel.from_pretrained( | |
model_path, | |
torch_dtype=torch.bfloat16 if device == 'cuda' else torch.float32, # Use bfloat16 only on CUDA | |
trust_remote_code=True, | |
# attn_implementation="flash_attention_2" # Optional: Speed up if FA2 is available | |
) | |
model = model.to(device).eval() | |
print("Model loaded.") | |
# Constants from Dream's config/tokenizer | |
MASK_TOKEN = tokenizer.mask_token | |
MASK_ID = tokenizer.mask_token_id # Use tokenizer's mask_token_id directly | |
PAD_ID = tokenizer.pad_token_id # Use tokenizer's pad_token_id | |
EOS_ID = tokenizer.eos_token_id # Use tokenizer's eos_token_id | |
# Use attributes from loaded config/tokenizer objects | |
# MASK_ID = config.mask_token_id # Can use this too, should be consistent | |
# PAD_ID = config.pad_token_id | |
# EOS_ID = config.eos_token_id | |
# Ensure mask_token_id is correctly identified | |
if MASK_ID is None: | |
print("Warning: Mask token ID not found in config/tokenizer. Trying to fetch from tokenizer...") | |
# Try getting from tokenizer directly if config doesn't have it or it's None | |
mask_token_special = tokenizer.mask_token | |
if mask_token_special: | |
MASK_ID = tokenizer.convert_tokens_to_ids(mask_token_special) | |
print(f"Found MASK_ID from tokenizer: {MASK_ID}") | |
else: | |
# Fallback or raise error if still not found | |
raise ValueError("Cannot determine MASK_ID. Check model's tokenizer configuration.") | |
# Make sure EOS_ID and PAD_ID are handled correctly; Dream uses the same ID for both | |
SPECIAL_TOKEN_IDS = {PAD_ID, EOS_ID, MASK_ID} | |
# Add other special tokens defined in tokenizer_config.json if needed for hiding | |
# Get IDs for im_start, im_end etc. if they should also be hidden/handled specially | |
try: | |
IM_START_ID = tokenizer.convert_tokens_to_ids("<|im_start|>") | |
IM_END_ID = tokenizer.convert_tokens_to_ids("<|im_end|>") | |
SPECIAL_TOKEN_IDS.add(IM_START_ID) | |
SPECIAL_TOKEN_IDS.add(IM_END_ID) | |
except KeyError: | |
print("Warning: <|im_start|> or <|im_end|> not found in tokenizer vocab.") | |
IM_START_ID = None | |
IM_END_ID = None | |
# --- Helper Functions --- | |
def parse_constraints(constraints_text: str) -> Dict[int, List[int]]: | |
""" | |
Parse constraints in format: 'position:word, position:word, ...' | |
Returns a dictionary mapping the starting position (0-indexed from the start | |
of the *generated* sequence) to a list of token IDs for the constraint word. | |
""" | |
constraints = {} | |
if not constraints_text: | |
return constraints | |
parts = constraints_text.split(',') | |
for part in parts: | |
part = part.strip() # Remove leading/trailing whitespace from the part itself | |
if ':' not in part: | |
continue | |
pos_str, word = part.split(':', 1) | |
try: | |
# Position relative to the start of the *generation* | |
pos = int(pos_str.strip()) | |
word = word.strip() # Strip whitespace from word | |
# Tokenize the word - Dream tokenizer handles spaces well typically. | |
# Let's check if the word starts with a space implicitly or needs one. | |
# Standard tokenizers often need a space prefix if not at the start. | |
# Test: tokenizer.encode(" world") vs tokenizer.encode("world") | |
# Dream often encodes ' world' differently from 'world'. | |
# Assume we want the word as it would appear mid-sentence unless pos is 0. | |
token_ids = tokenizer.encode(word, add_special_tokens=False) | |
# Add space prefix if needed based on position? This is tricky. | |
# Let's assume the user provides the word how they want it tokenized, | |
# potentially including a leading space if necessary. | |
# Example: " 5: word" might be tokenized differently than "5:word". | |
# Simplest approach: Tokenize exactly what the user provided. | |
# Let's refine: add space prefix automatically if pos > 0, unless word already starts with space? | |
# This seems more robust for typical usage. | |
if pos > 0 and not word.startswith(" "): | |
token_ids_with_space = tokenizer.encode(" " + word, add_special_tokens=False) | |
# Check if adding space actually changes tokenization significantly | |
# Heuristic: if the first token ID changes, use the space-prefixed version. | |
first_token_no_space = tokenizer.encode(word, add_special_tokens=False)[0] if token_ids else None | |
first_token_with_space = tokenizer.encode(" " + word, add_special_tokens=False)[0] if token_ids_with_space else None | |
if first_token_no_space != first_token_with_space: | |
token_ids = token_ids_with_space | |
# If tokenization doesn't change much, maybe stick to original? Less surprising. | |
# Let's stick to adding the space if pos > 0 for consistency, like original code. | |
token_ids = tokenizer.encode(" " + word, add_special_tokens=False) | |
elif pos == 0: | |
token_ids = tokenizer.encode(word, add_special_tokens=False) | |
if token_ids and pos >= 0: | |
constraints[pos] = token_ids | |
elif not token_ids: | |
print(f"Warning: Could not tokenize constraint word '{word}'") | |
except ValueError: | |
print(f"Warning: Invalid position '{pos_str}' in constraint part '{part}'") | |
continue # Ignore malformed constraint parts | |
except Exception as e: | |
print(f"Warning: Error processing constraint '{part}': {e}") | |
continue | |
print(f"Parsed constraints: {constraints}") # Debugging | |
return constraints | |
def format_chat_history(history: List[List[Optional[str]]]) -> List[Dict[str, str]]: | |
""" | |
Format chat history for the Dream model's chat template. | |
Args: | |
history: List of [user_message, assistant_message] pairs. | |
The last assistant_message might be None. | |
Returns: | |
Formatted list of message dictionaries for tokenizer.apply_chat_template. | |
""" | |
messages = [] | |
for user_msg, assistant_msg in history: | |
if user_msg: # Defensive check | |
messages.append({"role": "user", "content": user_msg}) | |
# Add assistant message only if it exists (it won't for the last turn before generation) | |
if assistant_msg: | |
messages.append({"role": "assistant", "content": assistant_msg}) | |
return messages | |
def apply_constraints_to_state( | |
x: torch.Tensor, | |
prompt_length: int, | |
total_length: int, | |
parsed_constraints: Dict[int, List[int]], | |
current_step: Optional[int] = None # For logging/debugging | |
) -> torch.Tensor: | |
"""Applies constraints directly to the state tensor `x`.""" | |
modified_x = x.clone() # Work on a copy to avoid modifying original if needed elsewhere | |
for rel_pos, word_token_ids in parsed_constraints.items(): | |
abs_start_pos = prompt_length + rel_pos | |
abs_end_pos = abs_start_pos + len(word_token_ids) | |
# Ensure the constraint fits within the generation length | |
if abs_start_pos < total_length and abs_end_pos <= total_length: | |
try: | |
constraint_tensor = torch.tensor(word_token_ids, dtype=torch.long, device=modified_x.device) | |
# Force the constraint tokens onto the sequence | |
modified_x[0, abs_start_pos:abs_end_pos] = constraint_tensor | |
# print(f"Debug (Step {current_step}): Applied constraint {tokenizer.decode(word_token_ids)} at pos {rel_pos}") # Debug | |
except IndexError: | |
print(f"Warning (Step {current_step}): Constraint at {rel_pos} ('{tokenizer.decode(word_token_ids)}') goes out of bounds.") | |
except Exception as e: | |
print(f"Warning (Step {current_step}): Failed to apply constraint at {rel_pos}: {e}") | |
return modified_x | |
# --- Core Generation Logic with Live Visualization --- | |
# Decorator for Hugging Face Spaces GPU usage | |
# Ensure no gradients are computed during generation | |
def generate_dream_response( | |
history: List[List[Optional[str]]], | |
gen_length: int, | |
steps: int, | |
constraints_text: str, | |
temperature: float, | |
top_p: Optional[float], | |
top_k: Optional[int], | |
alg: str, | |
alg_temp: Optional[float], | |
visualization_delay: float | |
) -> List[Tuple[str, str]]: | |
""" | |
Generates text using the Dream model step-by-step and yields visualization states live. | |
Args: | |
history: Chat history. | |
gen_length: Max new tokens to generate. | |
steps: Number of diffusion steps. | |
constraints_text: User-provided constraints string. | |
temperature: Sampling temperature. | |
top_p: Top-p sampling nucleus. Clamp to < 1.0 or None. | |
top_k: Top-k sampling. Clamp to > 0 or None. | |
alg: Remasking algorithm ('origin', 'maskgit_plus', 'topk_margin', 'entropy'). | |
alg_temp: Temperature for confidence-based algorithms. | |
visualization_delay: Delay between visualization steps. | |
Yields: | |
Tuple[List[List[Optional[str]]], List[Tuple[str, Optional[str]]], str]: | |
- Updated history (may be intermediate until final response) | |
- Visualization data for HighlightedText for the current step | |
- Intermediate or Final response text (yielded repeatedly) | |
""" | |
if not history or not history[-1][0]: | |
yield history, [("No input message found.", "red")], "" | |
return | |
# --- 1. Preparation --- | |
last_user_message = history[-1][0] | |
messages_for_template = format_chat_history(history) # Includes the latest user message | |
# Parse constraints relative to the *generated* sequence | |
parsed_constraints = parse_constraints(constraints_text) # Dict[rel_pos, List[token_id]] | |
# Prepare inputs using the chat template | |
try: | |
inputs = tokenizer.apply_chat_template( | |
messages_for_template, | |
return_tensors="pt", | |
return_dict=True, | |
add_generation_prompt=True # Important for instruct models | |
) | |
input_ids = inputs.input_ids.to(device) | |
prompt_attention_mask = inputs.attention_mask.to(device) # Mask for the prompt part | |
prompt_length = input_ids.shape[1] | |
except Exception as e: | |
print(f"Error applying chat template: {e}") | |
yield history, [("Error preparing input.", "red")], "" | |
return | |
# --- Config parameters for the loop --- | |
eps = 1e-3 # Default from DreamGenerationConfig, make configurable if needed | |
# Ensure top_p and top_k have valid values for filtering functions | |
top_p_val = top_p if top_p is not None and top_p < 1.0 else None | |
top_k_val = top_k if top_k is not None and top_k > 0 else None | |
alg_temp_val = alg_temp if alg in ['maskgit_plus', 'topk_margin', 'entropy'] else None | |
# --- 2. Initialize Generation State --- | |
total_length = prompt_length + gen_length | |
# Initial state: prompt + MASK tokens | |
initial_generation_part = torch.full((1, gen_length), MASK_ID, dtype=torch.long, device=device) | |
x = torch.cat((input_ids, initial_generation_part), dim=1) | |
# Prepare full attention mask (assuming full attention over generated part initially) | |
generation_attention_mask = torch.ones((1, gen_length), dtype=torch.long, device=device) | |
full_attention_mask = torch.cat((prompt_attention_mask, generation_attention_mask), dim=1) | |
# Check if model needs specific attention mask format (e.g., causal for prompt?) | |
# The original `diffusion_generate` handles this internally. Replicating requires care. | |
# Based on `_sample`, it prepares a broadcastable mask if padding exists, else uses "full". | |
# Let's assume "full" attention is okay for Dream's purpose here, as mask tokens don't depend on future masks. | |
# If the base model *requires* causal masking internally even with diffusion, this might need adjustment. | |
# For simplicity, using a full mask (ones) over the whole sequence. | |
# The model's internal attention should handle causality if needed. | |
# Let's stick to the simpler full mask preparation from the original code when no padding. | |
if torch.any(full_attention_mask == 0): # Handle padding if present (shouldn't be with template?) | |
tok_idx = full_attention_mask.long().cumsum(-1) - 1 | |
tok_idx.masked_fill_(full_attention_mask == 0, 0) # Use 0 for padding index? Or 1? Check original. Original used 1. | |
tok_idx.masked_fill_(full_attention_mask == 0, 1) | |
attention_mask_for_model = torch.logical_and( | |
full_attention_mask.unsqueeze(1).unsqueeze(-2), | |
full_attention_mask.unsqueeze(1).unsqueeze(-1), | |
) # Shape [B, 1, N, N] | |
else: | |
tok_idx = None | |
attention_mask_for_model = None # Let the model handle full attention if mask is None | |
# Timesteps for diffusion | |
timesteps = torch.linspace(1, eps, steps + 1, device=device) | |
# Apply initial constraints (before first step) | |
x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=-1) # Step -1 for initial | |
# --- 3. Visualization Setup --- | |
previous_tokens_vis = None # Keep track of the previous step's state for coloring | |
final_response_text = "" # Store the final decoded text | |
history_copy = [list(item) for item in history] # Make a mutable copy | |
# --- 4. Initial Yield (Masked State) --- | |
initial_generated_tokens = x[0, prompt_length:].cpu() | |
vis_data_initial = [] | |
for tok_id in initial_generated_tokens.tolist(): | |
display_token = MASK_TOKEN | |
color = "#444444" # Dark Gray for masks | |
vis_data_initial.append((display_token, color)) | |
previous_tokens_vis = initial_generated_tokens | |
yield history_copy, vis_data_initial, "" # Yield initial state | |
time.sleep(visualization_delay) | |
# --- 5. Step-by-Step Diffusion Loop --- | |
try: | |
start_time = time.time() | |
for i in range(steps): | |
# --- Model Forward Pass --- | |
mask_index = (x == MASK_ID) # Find masks in the *current* state x | |
if not mask_index.any(): # Stop if no masks left | |
print(f"No mask tokens left at step {i}. Stopping early.") | |
break | |
# print(f"Step {i}: Input shape {x.shape}, Mask sum {mask_index.sum()}") # Debug | |
# print(f"Step {i}: Input tokens (first/last 10): {x[0, :10].tolist()} ... {x[0, -10:].tolist()}") # Debug | |
# Call the model - ensure attention mask format is correct | |
# The model forward expects `attention_mask` usually of shape [B, N] or broadcastable. | |
# If we use `attention_mask_for_model = None`, it implies full attention. | |
# If we computed `attention_mask_for_model` as [B, 1, N, N], pass that. | |
# Let's try passing the [B, N] mask and let the model handle broadcasting/causality internally. | |
outputs = model( | |
input_ids=x, | |
attention_mask=full_attention_mask, # Pass the [B, N] mask | |
position_ids=None, # Let model compute default positions | |
use_cache=False, # No cache needed for diffusion steps | |
return_dict=True | |
) | |
logits = outputs.logits | |
# Shift logits like in original code? Check `generation_utils.py`. | |
# Yes, `logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1)` | |
# This seems to align logits with the *previous* token's prediction. Is this correct for diffusion? | |
# Let's assume the original code did this for a reason, perhaps related to how the model was trained or expects inputs. | |
# Update: Looking at standard LM forward pass, logits[t] predicts token[t+1]. | |
# The shift aligns logits[t] with token[t]. Let's keep it. | |
logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1) | |
# Select logits for masked positions | |
# Ensure mask_index has the same batch dimension size as logits | |
# mask_index shape is [B, N], logits shape is [B, N, V] | |
# We need to select elements from the last dim of logits where mask is True | |
mask_logits = logits[mask_index] # This correctly selects [num_masked_tokens, V] | |
if mask_logits.numel() == 0: # If no masks, logits selection is empty | |
print(f"No masked tokens found for logit selection at step {i}. Stopping.") | |
break | |
# print(f"Step {i}: mask_logits shape: {mask_logits.shape}") # Debug | |
# --- Sampling / Remasking Logic --- | |
t = timesteps[i] | |
s = timesteps[i + 1] | |
x_new_masked_part = torch.full_like(x[mask_index], MASK_ID, device=device, dtype=torch.long) | |
if alg == 'origin': | |
# Original diffusion logic | |
p_transfer = (1.0 - s / t) if i < steps - 1 else 1.0 # Ensure float division | |
# Sample only for the tokens to be revealed in this step | |
num_masked = mask_logits.shape[0] | |
transfer_indices_relative = torch.rand(num_masked, device=device) < p_transfer | |
logits_to_sample = mask_logits[transfer_indices_relative] | |
if logits_to_sample.numel() > 0: | |
# print(f"Step {i} (origin): Sampling {logits_to_sample.shape[0]} tokens.") # Debug | |
_, sampled_tokens = sample_tokens(logits_to_sample, temperature=temperature, top_p=top_p_val, top_k=top_k_val) | |
# Place sampled tokens into the correct positions within the masked part | |
x_new_masked_part[transfer_indices_relative] = sampled_tokens | |
# else: | |
# print(f"Step {i} (origin): No tokens to sample (p_transfer={p_transfer}).") # Debug | |
else: | |
# Confidence-based algorithms (maskgit_plus, topk_margin, entropy) | |
use_margin = (alg == 'topk_margin') | |
use_entropy = (alg == 'entropy') | |
# print(f"Step {i} ({alg}): Sampling all {mask_logits.shape[0]} masked tokens for confidence.") # Debug | |
confidence, x0_candidates = sample_tokens( | |
mask_logits, | |
temperature=temperature, | |
top_p=top_p_val, | |
top_k=top_k_val, | |
margin_confidence=use_margin, | |
neg_entropy=use_entropy | |
) | |
# print(f"Step {i} ({alg}): Confidence range: [{confidence.min():.2f}, {confidence.max():.2f}]") # Debug | |
num_mask_token = mask_logits.shape[0] | |
# Calculate number to reveal based on time steps, ensure it's an int | |
target_num_revealed_float = num_mask_token * (1.0 - s / t) | |
number_transfer_tokens = int(target_num_revealed_float) if i < steps - 1 else num_mask_token | |
if number_transfer_tokens > 0: | |
# print(f"Step {i} ({alg}): Need to reveal {number_transfer_tokens} tokens.") # Debug | |
if alg_temp_val is None or alg_temp_val <= 0: # Use top-k confidence | |
# Sort by confidence (use negative entropy directly if alg='entropy') | |
# For entropy, lower (more negative) is higher confidence (less uncertainty) | |
sort_metric = confidence if alg != 'entropy' else -confidence | |
_, transfer_indices_relative = torch.topk(sort_metric, k=min(number_transfer_tokens, num_mask_token)) # Ensure k is not > num_mask_token | |
else: # Use sampling based on confidence temperature | |
conf_probs = confidence / alg_temp_val | |
# Check for inf/-inf before softmax | |
conf_probs = torch.nan_to_num(conf_probs, nan=0.0, posinf=1e9, neginf=-1e9) | |
conf_probs = F.softmax(conf_probs, dim=-1) | |
# Check probs sum to 1 | |
if not torch.allclose(conf_probs.sum(), torch.tensor(1.0, device=device), atol=1e-4): | |
print(f"Warning step {i}: Confidence probabilities do not sum to 1 after softmax ({conf_probs.sum()}). Re-normalizing.") | |
conf_probs = conf_probs / conf_probs.sum(dim=-1, keepdim=True) # Normalize | |
# Ensure num_samples is valid | |
num_samples = min(number_transfer_tokens, num_mask_token) | |
if conf_probs.numel() > 0 and num_samples > 0: | |
try: | |
transfer_indices_relative = torch.multinomial(conf_probs, num_samples=num_samples, replacement=False) | |
except RuntimeError as e: | |
print(f"Warning step {i}: Multinomial sampling failed ('{e}'). Falling back to top-k.") | |
# Fallback to top-k if multinomial fails (e.g., due to prob issues) | |
sort_metric = confidence if alg != 'entropy' else -confidence | |
_, transfer_indices_relative = torch.topk(sort_metric, k=num_samples) | |
else: | |
transfer_indices_relative = torch.tensor([], dtype=torch.long, device=device) # No indices if no probs or num_samples=0 | |
# Place the selected candidate tokens into the masked part update | |
if transfer_indices_relative.numel() > 0: | |
x_new_masked_part[transfer_indices_relative] = x0_candidates[transfer_indices_relative].clone() | |
# else: | |
# print(f"Step {i} ({alg}): No tokens revealed via confidence ({number_transfer_tokens} target).") # Debug | |
# Update the global state `x` only at the masked positions | |
x[mask_index] = x_new_masked_part | |
# --- Apply Constraints --- | |
# Constraints should be applied *after* sampling/revealing tokens for the step | |
x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=i) | |
# --- Yield Visualization --- | |
current_generated_tokens = x[0, prompt_length:].cpu() # Get generated part, move to CPU | |
vis_data = [] | |
for j in range(gen_length): | |
current_tok_id = current_generated_tokens[j].item() | |
previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None else MASK_ID | |
try: | |
decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False) | |
display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token | |
except Exception: | |
display_token = f"[ID:{current_tok_id}]" # Fallback | |
color = None | |
token_to_display = display_token | |
if current_tok_id == MASK_ID: | |
color = "#444444" # Dark Gray for masks | |
elif previous_tok_id == MASK_ID: # Token was just revealed | |
color = "#66CC66" # Light Green | |
else: # Token was already revealed | |
color = "#6699CC" # Light Blue | |
# Hide special tokens (PAD/EOS) if they were already revealed (LLaDA effect) | |
# Ensure PAD_ID and EOS_ID are not None before checking | |
should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or \ | |
(EOS_ID is not None and current_tok_id == EOS_ID) | |
if should_hide and previous_tok_id == current_tok_id: | |
token_to_display = "" # Hide by making empty | |
color = None # No color for hidden | |
if token_to_display: | |
vis_data.append((token_to_display, color)) | |
elif len(vis_data) > 0 and isinstance(vis_data[-1], tuple) and vis_data[-1][0] == " ": | |
# Avoid adding multiple spaces if tokens are hidden consecutively | |
pass | |
elif len(vis_data) > 0 and not isinstance(vis_data[-1], tuple) and vis_data[-1] == " ": | |
pass # Already added a space | |
elif len(vis_data) > 0 : | |
# Add a single space if hiding follows a visible token, improves readability slightly | |
# Let's simplify: just omit hidden tokens. Adding spaces might be complex. | |
pass | |
# Update previous state for the next iteration | |
previous_tokens_vis = current_generated_tokens | |
# Decode intermediate response (might be partial) - skip specials for readability | |
intermediate_response_tokens = x[0, prompt_length:] | |
intermediate_response_text = tokenizer.decode( | |
intermediate_response_tokens, | |
skip_special_tokens=True, | |
clean_up_tokenization_spaces=True | |
).strip() | |
# Yield current state | |
# We yield the *current* history, the vis data for this step, and intermediate text | |
# The final text will overwrite the intermediate text in the UI eventually | |
yield history_copy, vis_data, intermediate_response_text | |
time.sleep(visualization_delay) | |
end_time = time.time() | |
print(f"Dream generation finished in {end_time - start_time:.2f} seconds.") | |
# --- 6. Final Processing & Yield --- | |
final_sequence = x[0] | |
response_tokens = final_sequence[prompt_length:] | |
# Decode the final response text | |
final_response_text = tokenizer.decode( | |
response_tokens, | |
skip_special_tokens=True, # Skip EOS, PAD, MASK etc. in the final text | |
clean_up_tokenization_spaces=True | |
).strip() | |
# Update history with the final response *before* the last yield | |
history_copy[-1][1] = final_response_text | |
# Yield the final state (which might be the same as the last yielded state if loop finished) | |
# Need to format vis_data one last time based on the final `x` | |
final_generated_tokens = x[0, prompt_length:].cpu() | |
vis_data_final = [] | |
for j in range(gen_length): | |
current_tok_id = final_generated_tokens[j].item() | |
previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None else MASK_ID | |
try: | |
decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False) | |
display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token | |
except Exception: | |
display_token = f"[ID:{current_tok_id}]" # Fallback | |
color = None | |
token_to_display = display_token | |
if current_tok_id == MASK_ID: | |
color = "#444444" | |
elif previous_tok_id == MASK_ID: | |
color = "#66CC66" | |
else: | |
color = "#6699CC" | |
should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or \ | |
(EOS_ID is not None and current_tok_id == EOS_ID) | |
if should_hide and previous_tok_id == current_tok_id: | |
token_to_display = "" | |
color = None | |
if token_to_display: | |
vis_data_final.append((token_to_display, color)) | |
# Yield the final history, final visualization, and final text | |
yield history_copy, vis_data_final, final_response_text | |
print("Visualization streaming complete.") | |
except Exception as e: | |
print(f"Error during generation or processing: {e}") | |
import traceback | |
traceback.print_exc() | |
# Update history with error message? Or leave as None? Let's leave as None. | |
yield history_copy, [("Error during generation.", "red")], "" | |
return | |
# --- Gradio UI (Remains largely the same, ensures outputs match yield structure) --- | |
css = ''' | |
.category-legend{display:none} | |
button{min-height: 60px} | |
''' | |
def create_chatbot_demo(): | |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# Dream 7B - Diffusion Language Model Demo") | |
gr.Markdown( | |
"[[Model Card](https://huggingface.co/Dream-org/Dream-v0-Instruct-7B)] " | |
"[[Blog](https://hkunlp.github.io/blog/2025/dream/)]" # Note: Link might be hypothetical | |
) | |
# STATE MANAGEMENT | |
# chat_history = gr.State([]) # Use gr.Chatbot's internal state implicitly if possible, or manage manually | |
# Let's manage manually with a list for clarity with yielding updates | |
_chat_history_store = gr.State([]) # Hidden state to store actual history list | |
# UI COMPONENTS | |
with gr.Row(): | |
with gr.Column(scale=3): | |
chatbot_ui = gr.Chatbot( | |
label="Conversation", | |
height=500, | |
show_copy_button=True, | |
bubble_full_width=False, | |
# value=[] # Initialize chatbot UI empty | |
) | |
# Message input | |
with gr.Group(): | |
with gr.Row(): | |
user_input = gr.Textbox( | |
label="Your Message", | |
placeholder="Type your message here...", | |
scale=7, | |
autofocus=True, | |
show_label=False, | |
container=False # Remove container for tighter packing | |
) | |
send_btn = gr.Button("Send", scale=1, variant="primary") | |
constraints_input = gr.Textbox( | |
label="Word Constraints (Optional)", | |
info="Place words at specific positions (0-indexed from start of generation). Format: 'pos:word, pos:word,...'. Example: '0:Once, 5:upon, 10:time'", | |
placeholder="0:Hello, 10:world", | |
value="" | |
) | |
with gr.Column(scale=2): | |
output_vis = gr.HighlightedText( | |
label="Denoising Process Visualization", | |
combine_adjacent=False, | |
show_legend=True, # Legend isn't very informative here | |
interactive=False # Not interactive | |
) | |
# Add a text box to display the final/intermediate response clearly | |
response_text_display = gr.Textbox( | |
label="Generated Response", | |
interactive=False, | |
lines=5 # Show a few lines | |
) | |
# Advanced generation settings | |
with gr.Accordion("Generation Settings", open=False): | |
with gr.Row(): | |
gen_length = gr.Slider( | |
minimum=16, maximum=512, value=128, step=8, # Increased max length | |
label="Max New Tokens" | |
) | |
steps = gr.Slider( | |
minimum=8, maximum=512, value=128, step=8, # Increased max steps | |
label="Diffusion Steps" | |
) | |
with gr.Row(): | |
temperature = gr.Slider( | |
minimum=0.0, maximum=1.0, value=0.4, step=0.05, | |
label="Temperature (0 = greedy)" | |
) | |
alg_temp = gr.Slider( | |
minimum=0.0, maximum=1.0, value=0.1, step=0.05, | |
label="Remasking Temp (Confidence Algs)" | |
) | |
with gr.Row(): | |
top_p = gr.Slider( | |
minimum=0.0, maximum=1.0, value=0.95, step=0.05, | |
label="Top-P (<=0 or >=1 disables)" # Clarify disabling condition | |
) | |
top_k = gr.Slider( | |
minimum=0, maximum=200, value=0, step=5, | |
label="Top-K (0 disables)" | |
) | |
with gr.Row(): | |
remasking_strategy = gr.Radio( | |
choices=['origin', 'maskgit_plus', 'topk_margin', 'entropy'], | |
value='entropy', # Default to entropy as in example | |
label="Remasking Strategy (Algorithm)" | |
) | |
with gr.Row(): | |
visualization_delay = gr.Slider( | |
minimum=0.0, maximum=0.5, value=0.03, step=0.01, # Slightly faster default | |
label="Visualization Delay (seconds)" | |
) | |
# Clear button | |
clear_btn = gr.Button("Clear Conversation") | |
# --- Event Handlers --- | |
def add_user_message_to_history(message: str, history_store: List[List[Optional[str]]]): | |
"""Adds user message, clears input, prepares for bot response.""" | |
if not message.strip(): | |
gr.Warning("Please enter a message.") | |
# Return unchanged history, empty vis, empty response text | |
return history_store, history_store, "", [], "" | |
# Add user message with placeholder for bot response | |
history_store.append([message, None]) | |
# Return updated history store, history for chatbot UI, empty input, empty vis, empty response | |
return history_store, history_store, "", [], "" | |
def clear_conversation(): | |
"""Clears the chat history, visualization, and response text.""" | |
return [], [], "", [], "" # History store, chatbot UI, input, vis, response text | |
# --- Connect UI elements --- | |
# Define the inputs for the generation function once | |
generation_inputs = [ | |
_chat_history_store, gen_length, steps, constraints_input, | |
temperature, top_p, top_k, remasking_strategy, alg_temp, | |
visualization_delay | |
] | |
# Define the outputs for the generation function | |
# Now yields: history_copy, vis_data, intermediate_response_text | |
# Map these to: chatbot_ui, output_vis, response_text_display | |
generation_outputs = [chatbot_ui, output_vis, response_text_display] | |
# Handle Textbox Submission (Enter key) | |
submit_listener = user_input.submit( | |
fn=add_user_message_to_history, | |
inputs=[user_input, _chat_history_store], | |
outputs=[_chat_history_store, chatbot_ui, user_input, output_vis, response_text_display] # Step 1: Add user msg & clear outputs | |
) | |
# Chain the bot response generation after the user message is added | |
submit_listener.then( | |
fn=generate_dream_response, | |
inputs=generation_inputs, | |
outputs=generation_outputs, # Step 2: Generate response and stream vis/text | |
show_progress="hidden" # Hide default progress bar as we have live vis | |
) | |
# Handle Send Button Click | |
click_listener = send_btn.click( | |
fn=add_user_message_to_history, | |
inputs=[user_input, _chat_history_store], | |
outputs=[_chat_history_store, chatbot_ui, user_input, output_vis, response_text_display] # Step 1: Add user msg & clear outputs | |
) | |
# Chain the bot response generation after the user message is added | |
click_listener.then( | |
fn=generate_dream_response, | |
inputs=generation_inputs, | |
outputs=generation_outputs, # Step 2: Generate response and stream vis/text | |
show_progress="hidden" | |
) | |
# Clear Button Action | |
clear_btn.click( | |
clear_conversation, | |
inputs=[], | |
outputs=[_chat_history_store, chatbot_ui, user_input, output_vis, response_text_display] | |
) | |
return demo | |
# --- Launch --- | |
if __name__ == "__main__": | |
demo = create_chatbot_demo() | |
# Use queue for handling multiple users and streaming | |
demo.queue().launch(debug=True, share=False) # Set share=True for public link |