# dream_app.py import torch import numpy as np import gradio as gr import spaces # Ensure spaces is installed if needed for GPU decorator import torch.nn.functional as F from transformers import AutoTokenizer, AutoModel, AutoConfig import time import re from typing import List, Dict, Tuple, Optional import torch.distributions as dists # Added import # --- START: Copied Helper functions from generation_utils.py --- # [Keep the copied functions: top_p_logits, top_k_logits, sample_tokens] def top_p_logits(logits, top_p=None): """ Applies top-p filtering to logits. """ if top_p is None or top_p >= 1.0: return logits sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device) mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove) logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min) return logits def top_k_logits(logits, top_k=None): """ Applies top-k filtering to logits. """ if top_k is None or top_k <= 0: return logits top_k = min(top_k, logits.size(-1)) indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min) return logits def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False): """ Samples tokens based on logits and calculates confidence. """ if temperature > 0: safe_temp = max(temperature, 1e-6) logits = logits / safe_temp if top_p is not None and 0.0 < top_p < 1.0: logits = top_p_logits(logits, top_p) if top_k is not None and top_k > 0: logits = top_k_logits(logits, top_k) is_all_neg_inf = torch.all(logits == torch.finfo(logits.dtype).min, dim=-1, keepdim=True) if torch.any(is_all_neg_inf): uniform_logits = torch.zeros_like(logits) logits = torch.where(is_all_neg_inf, uniform_logits, logits) probs = torch.softmax(logits, dim=-1) probs = torch.clamp(probs, min=0.0) probs = probs / probs.sum(dim=-1, keepdim=True) probs = torch.nan_to_num(probs, nan=0.0) if temperature > 0: try: x0 = dists.Categorical(probs=probs).sample() confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1) except Exception as e: print(f"Warning: Error during Categorical sampling: {e}. Falling back to argmax.") confidence, x0 = probs.max(dim=-1) else: confidence, x0 = probs.max(dim=-1) if margin_confidence: sorted_probs, _ = torch.sort(probs, dim=-1, descending=True) top1_probs = sorted_probs[..., 0] top2_probs = sorted_probs[..., 1] if sorted_probs.shape[-1] > 1 else top1_probs confidence = top1_probs - top2_probs if neg_entropy: epsilon = 1e-10 log_probs = torch.log(probs + epsilon) confidence = torch.sum(probs * log_probs, dim=-1) confidence = torch.nan_to_num(confidence, nan=0.0) return confidence, x0 # --- END: Copied Helper functions --- # [Keep model loading, constants] config = AutoConfig.from_pretrained("Dream-org/Dream-v0-Instruct-7B", trust_remote_code=True) model_path = "Dream-org/Dream-v0-Instruct-7B" device = 'cuda' if torch.cuda.is_available() else 'cpu' print(f"Using device: {device}") print("Loading tokenizer...") tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) print("Loading model...") model = AutoModel.from_pretrained( model_path, torch_dtype=torch.bfloat16 if device == 'cuda' else torch.float32, trust_remote_code=True, attn_implementation="sdpa" ) model = model.to(device).eval() print("Model loaded.") MASK_TOKEN = tokenizer.mask_token MASK_ID = tokenizer.mask_token_id PAD_ID = tokenizer.pad_token_id EOS_ID = tokenizer.eos_token_id if MASK_ID is None: raise ValueError("Cannot determine MASK_ID.") SPECIAL_TOKEN_IDS = {PAD_ID, EOS_ID, MASK_ID} try: IM_START_ID = tokenizer.convert_tokens_to_ids("<|im_start|>") IM_END_ID = tokenizer.convert_tokens_to_ids("<|im_end|>") SPECIAL_TOKEN_IDS.add(IM_START_ID) SPECIAL_TOKEN_IDS.add(IM_END_ID) except KeyError: IM_START_ID, IM_END_ID = None, None # --- Helper Functions --- def parse_constraints(constraints_text: str) -> Dict[int, List[int]]: """ Parses word constraints. """ constraints = {} if not constraints_text: return constraints parts = constraints_text.split(',') for part in parts: part = part.strip() if ':' not in part: continue pos_str, word = part.split(':', 1) try: pos = int(pos_str.strip()) word = word.strip() token_ids = [] if word: text_to_encode = (" " + word) if (pos > 0 and not word.startswith(" ")) else word token_ids = tokenizer.encode(text_to_encode, add_special_tokens=False) if token_ids and pos >= 0: constraints[pos] = token_ids elif not token_ids and word: print(f"Warning: Could not tokenize constraint word '{word}'") except ValueError: print(f"Warning: Invalid position '{pos_str}' in constraint part '{part}'") except Exception as e: print(f"Warning: Error processing constraint '{part}': {e}") return constraints def format_chat_history(history: List[List[Optional[str]]]) -> List[Dict[str, str]]: """ Formats chat history [[user, bot], [user, bot]] into [{'role': 'user', 'content': ...}, ...] for the tokenizer's chat template. """ messages = [] # Ensure history is not empty and is properly structured if not history: return messages for turn in history: if not isinstance(turn, (list, tuple)) or len(turn) != 2: print(f"Warning: Skipping malformed history turn: {turn}") continue user_msg, assistant_msg = turn if user_msg is not None: # Check if user message exists # Ensure content is a string user_content = str(user_msg) if user_msg is not None else "" messages.append({"role": "user", "content": user_content}) # Add assistant message only if it exists and is not None if assistant_msg is not None: assistant_content = str(assistant_msg) if assistant_msg is not None else "" messages.append({"role": "assistant", "content": assistant_content}) # print(f"Formatted messages for template: {messages}") # Debug return messages def apply_constraints_to_state( x: torch.Tensor, prompt_length: int, total_length: int, parsed_constraints: Dict[int, List[int]], current_step: Optional[int] = None ) -> torch.Tensor: """ Applies constraints to the state tensor `x`. """ modified_x = x.clone() for rel_pos, word_token_ids in parsed_constraints.items(): abs_start_pos = prompt_length + rel_pos abs_end_pos = abs_start_pos + len(word_token_ids) if abs_start_pos < total_length and abs_end_pos <= total_length: try: constraint_tensor = torch.tensor(word_token_ids, dtype=torch.long, device=modified_x.device) modified_x[0, abs_start_pos:abs_end_pos] = constraint_tensor except IndexError: print(f"Warning (Step {current_step}): Constraint OOB: {rel_pos}") except Exception as e: print(f"Warning (Step {current_step}): Constraint failed {rel_pos}: {e}") return modified_x # --- Core Generation Logic with Live Visualization --- @spaces.GPU @torch.no_grad() def generate_dream_response( history: List[List[Optional[str]]], # IMPORTANT: This is the *full* history from the state gen_length: int, steps: int, constraints_text: str, temperature: float, top_p: Optional[float], top_k: Optional[int], alg: str, alg_temp: Optional[float], visualization_delay: float ): # No return type annotation for generators in older Python? Or use -> Iterator[Tuple[...]] """ Generates text step-by-step and yields visualization states live. """ # Ensure history is valid before proceeding if not history or not history[-1] or history[-1][0] is None: # Yield the current (potentially empty) history back yield history, [("No valid input message found.", "red")], "" return # --- 1. Preparation --- # Use the *entire* history received from the state for context messages_for_template = format_chat_history(history) parsed_constraints = parse_constraints(constraints_text) try: inputs = tokenizer.apply_chat_template( messages_for_template, return_tensors="pt", return_dict=True, add_generation_prompt=True # This adds the assistant prompt turn ) input_ids = inputs.input_ids.to(device) prompt_attention_mask = inputs.attention_mask.to(device) if 'attention_mask' in inputs else torch.ones_like(input_ids) prompt_length = input_ids.shape[1] # print(f"Prompt length for model: {prompt_length}") # Debug # print(f"Input IDs to model (first 50): {input_ids[0, :50].tolist()}") # Debug except Exception as e: print(f"Error applying chat template: {e}") # Yield the current history back with an error message yield history, [("Error preparing input.", "red")], "" return eps = 1e-3 top_p_val = top_p if top_p is not None and 0.0 < top_p < 1.0 else None top_k_val = top_k if top_k is not None and top_k > 0 else None alg_temp_val = alg_temp if alg in ['maskgit_plus', 'topk_margin', 'entropy'] and alg_temp is not None and alg_temp > 0 else None # --- 2. Initialize Generation State --- total_length = prompt_length + gen_length initial_generation_part = torch.full((1, gen_length), MASK_ID, dtype=torch.long, device=device) x = torch.cat((input_ids, initial_generation_part), dim=1) # --- Prepare Attention Mask --- generation_attention_mask = torch.ones((1, gen_length), dtype=torch.long, device=device) full_attention_mask_long = torch.cat((prompt_attention_mask, generation_attention_mask), dim=1) attention_mask_for_model = full_attention_mask_long.to(model.dtype) large_neg_val = torch.finfo(model.dtype).min attention_mask_for_model = (1.0 - attention_mask_for_model) * large_neg_val attention_mask_for_model = attention_mask_for_model.unsqueeze(1).unsqueeze(2) # Shape [B, 1, 1, N] timesteps = torch.linspace(1, eps, steps + 1, device=device) x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=-1) # --- 3. Visualization & State Setup --- previous_tokens_vis = None # Use the passed-in history directly. We will modify the *last* item's assistant response. # No need for history_copy if we are careful. Let's try modifying `history` directly. # IMPORTANT: Gradio state needs the component to receive the *entire object* back if it's mutated. # So yielding the modified `history` list itself should work. history_for_yield = history # Reference the original list # --- 4. Initial Yield (Masked State) --- initial_generated_tokens = x[0, prompt_length:].cpu() vis_data_initial = [] for tok_id in initial_generated_tokens.tolist(): vis_data_initial.append((MASK_TOKEN, "#444444")) previous_tokens_vis = initial_generated_tokens # Yield the *current* history (with None for last bot msg) yield history_for_yield, vis_data_initial, "" time.sleep(visualization_delay) # --- 5. Step-by-Step Diffusion Loop --- try: start_time = time.time() current_response_text = "" # Store intermediate text for i in range(steps): mask_index = (x == MASK_ID) if not mask_index.any(): print(f"No mask tokens left at step {i}. Stopping early.") break outputs = model( input_ids=x, attention_mask=attention_mask_for_model, position_ids=None, use_cache=False, return_dict=True ) logits = outputs.logits logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1) mask_logits = logits[mask_index] if mask_logits.numel() == 0: print(f"No masked tokens found for logit selection at step {i}. Stopping.") break t = timesteps[i]; s = timesteps[i + 1] x_new_masked_part = torch.full_like(x[mask_index], MASK_ID, device=device, dtype=torch.long) # [Sampling logic remains the same as previous working version] if alg == 'origin': p_transfer = (1.0 - s / t) if i < steps - 1 else 1.0 num_masked = mask_logits.shape[0] transfer_indices_relative = torch.rand(num_masked, device=device) < p_transfer logits_to_sample = mask_logits[transfer_indices_relative] if logits_to_sample.numel() > 0: _, sampled_tokens = sample_tokens(logits_to_sample, temperature=temperature, top_p=top_p_val, top_k=top_k_val) x_new_masked_part[transfer_indices_relative] = sampled_tokens else: # Confidence-based use_margin = (alg == 'topk_margin'); use_entropy = (alg == 'entropy') confidence, x0_candidates = sample_tokens( mask_logits, temperature=temperature, top_p=top_p_val, top_k=top_k_val, margin_confidence=use_margin, neg_entropy=use_entropy ) num_mask_token = mask_logits.shape[0] target_num_revealed_float = num_mask_token * (1.0 - s / t) number_transfer_tokens = int(target_num_revealed_float) if i < steps - 1 else num_mask_token if number_transfer_tokens > 0: num_samples = min(number_transfer_tokens, num_mask_token) if num_samples > 0: transfer_indices_relative = torch.tensor([], dtype=torch.long, device=device) # Init empty if alg_temp_val is None or alg_temp_val <= 0: # Top-k sort_metric = confidence if alg != 'entropy' else -confidence k_topk = min(num_samples, sort_metric.numel()) if k_topk > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_topk) else: # Sampled if confidence.numel() > 0: conf_probs = confidence / alg_temp_val conf_probs = torch.nan_to_num(conf_probs, nan=0.0, posinf=1e9, neginf=-1e9) conf_probs = torch.clamp(conf_probs - conf_probs.max(), min=-30) conf_probs = F.softmax(conf_probs, dim=-1) conf_probs = torch.clamp(conf_probs, min=0.0) conf_probs = torch.nan_to_num(conf_probs, nan=0.0) prob_sum = conf_probs.sum() target_sum_tensor = torch.tensor(1.0, device=device, dtype=prob_sum.dtype) if not torch.isclose(prob_sum, target_sum_tensor, atol=1e-4) and prob_sum > 0: safe_prob_sum = torch.max(prob_sum, torch.tensor(1e-12, device=device, dtype=prob_sum.dtype)) conf_probs = conf_probs / safe_prob_sum final_prob_sum_check = conf_probs.sum() if conf_probs.numel() > 0 and num_samples > 0 and torch.all(conf_probs >= 0) and torch.isclose(final_prob_sum_check, target_sum_tensor, atol=1e-4): try: transfer_indices_relative = torch.multinomial(conf_probs, num_samples=num_samples, replacement=False) except RuntimeError as e: print(f"W{i}: Multinomial failed ('{e}'). Fallback.") # Fallback handled below if transfer_indices_relative.numel() == 0: # Fallback if sampling failed or wasn't possible sort_metric = confidence if alg != 'entropy' else -confidence k_fallback = min(num_samples, sort_metric.numel()) if k_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_fallback) # Apply transfer if transfer_indices_relative.numel() > 0: valid_indices = transfer_indices_relative < x0_candidates.shape[0] valid_transfer_indices = transfer_indices_relative[valid_indices] if valid_transfer_indices.numel() > 0 and valid_transfer_indices.max() < x_new_masked_part.shape[0]: x_new_masked_part[valid_transfer_indices] = x0_candidates[valid_transfer_indices].clone() x[mask_index] = x_new_masked_part x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=i) # --- Yield Visualization --- current_generated_tokens = x[0, prompt_length:].cpu() vis_data = [] # [Visualization formatting logic remains the same] for j in range(gen_length): current_tok_id = current_generated_tokens[j].item() previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID try: decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False) display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token except Exception: display_token = f"[ID:{current_tok_id}]" color = None; token_to_display = display_token if current_tok_id == MASK_ID: color = "#444444" elif previous_tok_id == MASK_ID: color = "#66CC66" else: color = "#6699CC" should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID) if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None if token_to_display: vis_data.append((token_to_display, color)) # --- previous_tokens_vis = current_generated_tokens # --- Update intermediate response text --- intermediate_response_tokens = x[0, prompt_length:] current_response_text = tokenizer.decode( intermediate_response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True ).strip() # --- Update history for yield --- # Update the placeholder in the *last turn* of the history list if history_for_yield and history_for_yield[-1]: history_for_yield[-1][1] = current_response_text + "..." # Indicate streaming # --- Yield current state --- yield history_for_yield, vis_data, current_response_text time.sleep(visualization_delay) # --- End loop iteration --- end_time = time.time() print(f"Dream generation finished in {end_time - start_time:.2f} seconds.") # --- 6. Final Processing & Yield --- final_sequence = x[0] response_tokens = final_sequence[prompt_length:] final_response_text = tokenizer.decode( response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True ).strip() # Update the history definitively with the final text if history_for_yield and history_for_yield[-1]: history_for_yield[-1][1] = final_response_text # Format final visualization final_generated_tokens = x[0, prompt_length:].cpu() vis_data_final = [] # [Final visualization formatting logic remains the same] for j in range(gen_length): current_tok_id = final_generated_tokens[j].item() previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID try: decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False) display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token except Exception: display_token = f"[ID:{current_tok_id}]" color = None; token_to_display = display_token if current_tok_id == MASK_ID: color = "#444444" elif previous_tok_id == MASK_ID: color = "#66CC66" else: color = "#6699CC" should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID) if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None if token_to_display: vis_data_final.append((token_to_display, color)) # --- # Yield the final state yield history_for_yield, vis_data_final, final_response_text print("Visualization streaming complete.") except Exception as e: print(f"Error during generation or processing: {e}") import traceback traceback.print_exc() # Ensure the history state reflects the error somehow? Or just yield error vis. # Yield the history *as it was* when the error occurred. if history_for_yield and history_for_yield[-1]: history_for_yield[-1][1] = f"" # Put error in bot response yield history_for_yield, [("Error during generation.", "red")], "" return # --- Gradio UI --- css = ''' .category-legend{display:none} button{min-height: 60px} ''' def create_chatbot_demo(): with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: gr.Markdown("# Dream 7B - Diffusion Language Model Demo") gr.Markdown( "[[Model Card](https://huggingface.co/Dream-org/Dream-v0-Instruct-7B)] " "[[Blog](https://hkunlp.github.io/blog/2025/dream/)]" ) # Use a single state variable for the history list chat_history_state = gr.State([]) with gr.Row(): with gr.Column(scale=3): chatbot_ui = gr.Chatbot( label="Conversation", height=500, show_copy_button=True, bubble_full_width=False, # value=[] # Initial value set by state binding later ) with gr.Group(): with gr.Row(): user_input = gr.Textbox( label="Your Message", placeholder="Type your message here...", scale=7, autofocus=True, show_label=False, container=False ) send_btn = gr.Button("Send", scale=1, variant="primary") constraints_input = gr.Textbox( label="Word Constraints (Optional)", info="Format: 'pos:word, pos:word,...'. Example: '0:Once, 5:upon, 10:time'", placeholder="0:Hello, 10:world", value="" ) with gr.Column(scale=2): output_vis = gr.HighlightedText( label="Denoising Process Visualization", combine_adjacent=True, show_legend=False, interactive=False ) response_text_display = gr.Textbox( label="Generated Response (Live)", interactive=False, lines=5 ) with gr.Accordion("Generation Settings", open=False): # [Settings sliders remain the same] with gr.Row(): gen_length = gr.Slider(minimum=16, maximum=512, value=128, step=8, label="Max New Tokens") steps = gr.Slider(minimum=8, maximum=512, value=128, step=8, label="Diffusion Steps") with gr.Row(): temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Temperature (0 = greedy)") alg_temp = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.05, label="Remasking Temp (Confidence Algs)") with gr.Row(): top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-P (0 disables)") top_k = gr.Slider(minimum=0, maximum=200, value=0, step=5, label="Top-K (0 disables)") with gr.Row(): remasking_strategy = gr.Radio(choices=['origin', 'maskgit_plus', 'topk_margin', 'entropy'], value='entropy', label="Remasking Strategy (Algorithm)") with gr.Row(): visualization_delay = gr.Slider(minimum=0.0, maximum=0.5, value=0.03, step=0.01, label="Visualization Delay (seconds)") clear_btn = gr.Button("Clear Conversation") # --- Event Handler Functions --- def add_user_message(message: str, history: List[List[Optional[str]]]): """ Adds the user message to the history state and prepares the UI for the bot's response (clearing previous outputs). """ if not message.strip(): gr.Warning("Please enter a message.") # Return unchanged history and empty outputs return history, history, "", [], "" # Append new turn with user message and None placeholder for bot response history.append([message, None]) # Return updated history (for state), history (for immediate UI update), # empty input, empty vis, empty response text. return history, history, "", [], "" def clear_all(): """Clears state and all relevant UI components.""" return [], [], "", [], "" # state, chatbot, input, vis, response text # --- Connect UI elements --- # Define inputs/outputs for the generator generation_inputs = [ chat_history_state, gen_length, steps, constraints_input, temperature, top_p, top_k, remasking_strategy, alg_temp, visualization_delay ] # Generator yields: history_list, vis_data, response_text generation_outputs = [chatbot_ui, output_vis, response_text_display] # Chain the actions: Submit/Click -> add_user_message -> generate_dream_response # 1. User submits message (Enter or Button) user_interaction = [user_input, chat_history_state] outputs_after_user_add = [ chat_history_state, # Update the state chatbot_ui, # Update chatbot UI immediately user_input, # Clear user input box output_vis, # Clear visualization response_text_display # Clear response text box ] submit_listener = user_input.submit( fn=add_user_message, inputs=user_interaction, outputs=outputs_after_user_add ).then( # 2. Trigger generation AFTER user message is added and UI cleared fn=generate_dream_response, inputs=generation_inputs, # Pass the updated state and parameters outputs=generation_outputs, # Stream updates to chatbot, vis, text show_progress="hidden" ) click_listener = send_btn.click( fn=add_user_message, inputs=user_interaction, outputs=outputs_after_user_add ).then( # 2. Trigger generation AFTER user message is added and UI cleared fn=generate_dream_response, inputs=generation_inputs, outputs=generation_outputs, show_progress="hidden" ) # 3. Clear Button clear_btn.click( clear_all, inputs=[], outputs=[ chat_history_state, chatbot_ui, user_input, output_vis, response_text_display ] ) return demo # --- Launch --- if __name__ == "__main__": demo = create_chatbot_demo() demo.queue().launch(debug=True, share=False)