File size: 3,566 Bytes
1ee0b41
 
 
 
320af82
 
 
 
 
 
 
 
 
 
 
 
1ee0b41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5ec15f
1ee0b41
 
 
 
320af82
 
cbc7df9
1ee0b41
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr

description = """## Token Probability Distribution Explorer

This interactive tool lets you visualize how different parameters affect the probability distribution of tokens.

- **Temperature**: Controls the randomness of predictions. Higher values (e.g., 2.0) make the distribution more uniform, while lower values (e.g., 0.1) make it peakier.
- **Top-k**: Limits the number of most likely tokens to consider. For example, `top_k=5` means only the top 5 tokens are considered, and others are set to zero probability.
- **Top-p (nucleus sampling)**: Limits the tokens to those whose cumulative probability mass is below a certain threshold. For instance, `top_p=0.9` means only tokens contributing to the top 90% of probability are considered.

Adjust the sliders to see how each parameter influences the token probabilities. All tokens will always have some non-zero probability in the initial distribution.
To learn more about LLM generation, check out the early release of [Hands-On Generative AI with Transformers and Diffusion Models](https://learning.oreilly.com/library/view/hands-on-generative-ai/9781098149239/).
"""

def get_initial_distribution(seed=42):
    np.random.seed(seed)  # For reproducibility
    token_probs = np.random.rand(10)
    token_probs /= np.sum(token_probs)  # Normalize to sum to 1
    return token_probs

def adjust_distribution(temperature, top_k, top_p, initial_probs):
    # Apply temperature scaling
    token_probs = np.exp(np.log(initial_probs) / temperature)
    token_probs /= np.sum(token_probs)

    # Apply Top-K filtering
    if top_k > 0:
        top_k_indices = np.argsort(token_probs)[-top_k:]
        top_k_probs = np.zeros_like(token_probs)
        top_k_probs[top_k_indices] = token_probs[top_k_indices]
        top_k_probs /= np.sum(top_k_probs) # Normalize after filtering
        token_probs = top_k_probs

    # Apply top_p (nucleus) filtering
    if top_p < 1.0:
        # Sort probabilities in descending order and compute cumulative sum
        sorted_indices = np.argsort(token_probs)[::-1]
        cumulative_probs = np.cumsum(token_probs[sorted_indices])

        # Find the cutoff index for nucleus sampling
        cutoff_index = np.searchsorted(cumulative_probs, top_p) + 1

        # Get the indices that meet the threshold
        top_p_indices = sorted_indices[:cutoff_index]
        top_p_probs = np.zeros_like(token_probs)
        top_p_probs[top_p_indices] = token_probs[top_p_indices]
        top_p_probs /= np.sum(top_p_probs) # Normalize after filtering
        token_probs = top_p_probs

    # Plotting the probabilities
    plt.figure(figsize=(10, 6))
    plt.bar(range(10), token_probs, tick_label=[f'Token {i}' for i in range(10)])
    plt.xlabel('Tokens')
    plt.ylabel('Probabilities')
    plt.title('Token Probability Distribution')
    plt.ylim(0, 1)
    plt.grid(True)
    plt.tight_layout()
    
    return plt

initial_probs = get_initial_distribution()

def update_plot(temperature, top_k, top_p):
    return adjust_distribution(temperature, top_k, top_p, initial_probs)

interface = gr.Interface(
    fn=update_plot,
    inputs=[
        gr.Slider(0, 5.0, step=0.1, value=1.0, label="Temperature"),
        gr.Slider(0, 10, step=1, value=5, label="Top-k"),
        gr.Slider(0.0, 1.0, step=0.01, value=0.9, label="Top-p"),
    ],
    outputs=gr.Plot(label="Token Probability Distribution"),
    live=True,
    title="Explore generation parameters of LLMs",
    description=description,
)

interface.launch()