Update app.py
Browse files
app.py
CHANGED
|
@@ -14,40 +14,59 @@ Adjust the sliders to see how each parameter influences the token probabilities.
|
|
| 14 |
To learn more about LLM generation, check out the early release of [Hands-On Generative AI with Transformers and Diffusion Models](https://learning.oreilly.com/library/view/hands-on-generative-ai/9781098149239/).
|
| 15 |
"""
|
| 16 |
|
| 17 |
-
def get_initial_distribution(seed=42):
|
| 18 |
np.random.seed(seed) # For reproducibility
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
return token_probs
|
| 22 |
|
| 23 |
def adjust_distribution(temperature, top_k, top_p, initial_probs):
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
#
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
# Plotting the probabilities
|
| 53 |
plt.figure(figsize=(10, 6))
|
|
|
|
| 14 |
To learn more about LLM generation, check out the early release of [Hands-On Generative AI with Transformers and Diffusion Models](https://learning.oreilly.com/library/view/hands-on-generative-ai/9781098149239/).
|
| 15 |
"""
|
| 16 |
|
| 17 |
+
def get_initial_distribution(num_tokens=10, min_prob=1e-3, seed=42):
|
| 18 |
np.random.seed(seed) # For reproducibility
|
| 19 |
+
|
| 20 |
+
# Ensure each token has at least `min_prob`
|
| 21 |
+
baseline_probs = np.full(num_tokens, min_prob)
|
| 22 |
+
remaining_prob = 1.0 - num_tokens * min_prob
|
| 23 |
+
|
| 24 |
+
# Distribute the remaining probability randomly
|
| 25 |
+
if remaining_prob > 0:
|
| 26 |
+
random_probs = np.random.rand(num_tokens)
|
| 27 |
+
random_probs /= np.sum(random_probs) # Normalize to sum to 1
|
| 28 |
+
token_probs = baseline_probs + remaining_prob * random_probs
|
| 29 |
+
else:
|
| 30 |
+
# If min_prob is too high, adjust probabilities to sum to 1
|
| 31 |
+
token_probs = baseline_probs
|
| 32 |
+
token_probs /= np.sum(token_probs)
|
| 33 |
+
|
| 34 |
return token_probs
|
| 35 |
|
| 36 |
def adjust_distribution(temperature, top_k, top_p, initial_probs):
|
| 37 |
+
if temperature == 0:
|
| 38 |
+
# Greedy sampling: pick the token with the highest probability
|
| 39 |
+
max_index = np.argmax(initial_probs)
|
| 40 |
+
token_probs = np.zeros_like(initial_probs)
|
| 41 |
+
token_probs[max_index] = 1.0
|
| 42 |
+
else:
|
| 43 |
+
# Apply temperature scaling
|
| 44 |
+
token_probs = np.exp(np.log(initial_probs) / temperature)
|
| 45 |
+
token_probs /= np.sum(token_probs)
|
| 46 |
+
|
| 47 |
+
# Apply Top-K filtering
|
| 48 |
+
if top_k > 0:
|
| 49 |
+
top_k_indices = np.argsort(token_probs)[-top_k:]
|
| 50 |
+
top_k_probs = np.zeros_like(token_probs)
|
| 51 |
+
top_k_probs[top_k_indices] = token_probs[top_k_indices]
|
| 52 |
+
top_k_probs /= np.sum(top_k_probs) # Normalize after filtering
|
| 53 |
+
token_probs = top_k_probs
|
| 54 |
+
|
| 55 |
+
# Apply top_p (nucleus) filtering
|
| 56 |
+
if top_p < 1.0:
|
| 57 |
+
# Sort probabilities in descending order and compute cumulative sum
|
| 58 |
+
sorted_indices = np.argsort(token_probs)[::-1]
|
| 59 |
+
cumulative_probs = np.cumsum(token_probs[sorted_indices])
|
| 60 |
+
|
| 61 |
+
# Find the cutoff index for nucleus sampling
|
| 62 |
+
cutoff_index = np.searchsorted(cumulative_probs, top_p) + 1
|
| 63 |
+
|
| 64 |
+
# Get the indices that meet the threshold
|
| 65 |
+
top_p_indices = sorted_indices[:cutoff_index]
|
| 66 |
+
top_p_probs = np.zeros_like(token_probs)
|
| 67 |
+
top_p_probs[top_p_indices] = token_probs[top_p_indices]
|
| 68 |
+
top_p_probs /= np.sum(top_p_probs) # Normalize after filtering
|
| 69 |
+
token_probs = top_p_probs
|
| 70 |
|
| 71 |
# Plotting the probabilities
|
| 72 |
plt.figure(figsize=(10, 6))
|