diff --git a/.cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body b/.cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body
new file mode 100644
index 0000000000000000000000000000000000000000..574d5f1d8164850bd105b6002a0254619993d6d6
--- /dev/null
+++ b/.cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c
+size 1080866
diff --git a/.cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body b/.cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body
new file mode 100644
index 0000000000000000000000000000000000000000..d2c2687e59bb5a5cd0a9b799c755a8fb93f14985
--- /dev/null
+++ b/.cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5bfae9500ad8e7d2937ebccb4906f3bc464d1bf66eedd0e4adabd520811c7b52
+size 2631958
diff --git a/.cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body b/.cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body
new file mode 100644
index 0000000000000000000000000000000000000000..6c384893196c7354f08b74a259f3932116b62b35
--- /dev/null
+++ b/.cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd4c97d69242efd604c1a2077c8b56341e236cfaca78c40f59dcef9b95464fdc
+size 9663908
diff --git a/.cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body b/.cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body
new file mode 100644
index 0000000000000000000000000000000000000000..a3a864dac79e15240b2e8446f7a6e15581f14ff6
--- /dev/null
+++ b/.cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f99e4769b4750076cd4235c044b61232110733322384a94a63791d2e7beacc66
+size 9995162
diff --git a/.cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body b/.cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body
new file mode 100644
index 0000000000000000000000000000000000000000..809851a68089eca1ad505f8fff9ac99e361005cb
--- /dev/null
+++ b/.cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69
+size 41178528
diff --git a/.cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body b/.cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body
new file mode 100644
index 0000000000000000000000000000000000000000..cecf04ce3251ff92603f49d20845c42a0cdb54dc
--- /dev/null
+++ b/.cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d2665c5df629eb2f981dab244c01bfa6cdc185f4ffa026639286c4d56fafb54
+size 1221827
diff --git a/.cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body b/.cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body
new file mode 100644
index 0000000000000000000000000000000000000000..b95ce08764d41e928dda6dd272c20aa83481fad9
--- /dev/null
+++ b/.cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9dca7c3956b03b7663fac4d150f5e6d4f6f38b2462c1e9afd83bcf7019f17913
+size 1080679
diff --git a/.cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body b/.cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body
new file mode 100644
index 0000000000000000000000000000000000000000..398188ce7fe86aa92a9865022a569c1abc93d346
--- /dev/null
+++ b/.cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba0d021a166865d2265246961bec0152ff124de910c5cc39f1156ce3fa7c69dc
+size 2110226
diff --git a/.cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body b/.cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body
new file mode 100644
index 0000000000000000000000000000000000000000..a6a547da64cdbf6a2c5732ad60e114f89c88c353
--- /dev/null
+++ b/.cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047
+size 39855626
diff --git a/.cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body b/.cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body
new file mode 100644
index 0000000000000000000000000000000000000000..e6b4d2f5cd2e6c767f701d7ad8581a386ecbeaae
--- /dev/null
+++ b/.cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57
+size 13064210
diff --git a/.cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl b/.cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl
new file mode 100644
index 0000000000000000000000000000000000000000..541955b7e3916f08038390349744150d2fc22100
--- /dev/null
+++ b/.cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8991eedb5038a1ee6fc9904f99c12b40213d66753ed91e261a43d085f5aeab8f
+size 187219571
diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..816a3d6e16466f62a44f22d7d68d45afd8b32ea2 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body filter=lfs diff=lfs merge=lfs -text
+.cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text
+.local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif filter=lfs diff=lfs merge=lfs -text
+.local/share/jupyter/nbextensions/nbTranslate/demo1.gif filter=lfs diff=lfs merge=lfs -text
+.local/share/jupyter/nbextensions/nbTranslate/demo2.gif filter=lfs diff=lfs merge=lfs -text
+.local/share/jupyter/nbextensions/scratchpad/demo.gif filter=lfs diff=lfs merge=lfs -text
+.local/share/jupyter/nbextensions/toc2/demo.gif filter=lfs diff=lfs merge=lfs -text
diff --git a/.ipynb_checkpoints/model-checkpoint.py b/.ipynb_checkpoints/model-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..1380c4f4cca1a18ca21d9d12509cfed04c63cc4f
--- /dev/null
+++ b/.ipynb_checkpoints/model-checkpoint.py
@@ -0,0 +1,390 @@
+# gpt2-model-positional-encodings.py
+
+import math
+import inspect
+from dataclasses import dataclass
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+# Import necessary modules for different positional encodings
+import numpy as np
+import scipy.special
+import scipy.signal
+
+from packaging import version
+
+# Check if scaled_dot_product_attention is available and supports flash attention
+use_flash_attn = 'scaled_dot_product_attention' in dir(F) and version.parse(torch.__version__) >= version.parse('2.0.0')
+if use_flash_attn:
+ print("Flash Attention v2 is available and will be used where possible.")
+else:
+ print("Flash Attention v2 is not available. Using standard attention.")
+
+class LayerNorm(nn.Module):
+ """LayerNorm with optional bias."""
+ def __init__(self, ndim, bias):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(ndim))
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
+ def forward(self, input):
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
+
+def get_positional_encoding(position, d_model, method, max_len=5000):
+ """
+ Generate positional encodings based on the specified method.
+ """
+ if method == 'default':
+ return None # Handled by nn.Embedding in the model
+ elif method == 'learned':
+ return None # Handled by nn.Embedding in the model
+ elif method == 'sinusoidal':
+ pe = torch.zeros(max_len, d_model)
+ position_enc = position.unsqueeze(1)
+ div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
+ pe[:, 0::2] = torch.sin(position_enc * div_term)
+ pe[:, 1::2] = torch.cos(position_enc * div_term)
+ return pe
+ elif method == 'exponential':
+ pe = torch.exp(-position.float() / max_len).unsqueeze(1).repeat(1, d_model)
+ return pe
+ elif method == 'polynomial_legendre':
+ pe = torch.zeros(max_len, d_model)
+ x = (position / max_len * 2) - 1 # Scale positions to [-1,1]
+ for i in range(d_model):
+ pe[:, i] = scipy.special.eval_legendre(i, x)
+ return pe
+ elif method == 'polynomial_chebyshev':
+ pe = torch.zeros(max_len, d_model)
+ x = (position / max_len * 2) - 1 # Scale positions to [-1,1]
+ for i in range(d_model):
+ pe[:, i] = scipy.special.eval_chebyt(i, x)
+ return pe
+ elif method == 'gaussian':
+ pe = torch.zeros(max_len, d_model)
+ positions = position.float()
+ means = torch.linspace(0, max_len, d_model)
+ std = max_len / d_model
+ for i in range(d_model):
+ pe[:, i] = torch.exp(- ((positions - means[i]) **2) / (2 * std **2))
+ return pe
+ elif method == 'random_fourier':
+ B = torch.randn(d_model, 1)
+ x = position.float() / max_len
+ x = x @ B.T * 2 * math.pi
+ pe = torch.cat([torch.sin(x), torch.cos(x)], dim=1)
+ return pe[:, :d_model]
+ elif method == 'wavelet':
+ pe = torch.zeros(max_len, d_model)
+ scales = torch.arange(1, d_model+1)
+ x = position.float()
+ for i in range(d_model):
+ wavelet = scipy.signal.ricker(points=max_len, a=scales[i])
+ pe[:, i] = torch.from_numpy(wavelet[position])
+ return pe
+ elif method == 'bessel':
+ pe = torch.zeros(max_len, d_model)
+ x = position.float()
+ for i in range(d_model):
+ pe[:, i] = scipy.special.jv(i, x)
+ return pe
+ elif method == 'alternative':
+ pe = torch.zeros(max_len, d_model)
+ position_enc = position.float()
+ div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
+ pe[:, 0::2] = torch.tan(position_enc * div_term)
+ pe[:, 1::2] = torch.sin(position_enc * div_term + math.pi / 4)
+ return pe
+ elif method == 'none':
+ return torch.zeros(max_len, d_model)
+ else:
+ raise ValueError(f"Unknown positional encoding method: {method}")
+
+class CausalSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ assert config.n_embd % config.n_head == 0
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.dropout = config.dropout
+ self.head_dim = self.n_embd // self.n_head
+
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
+ self.resid_dropout = nn.Dropout(config.dropout)
+
+ # Implement attention-level positional encodings
+ if config.attention_type == 'rope':
+ self.rotary_dim = self.n_embd // self.n_head
+ if self.rotary_dim % 2 != 0:
+ self.rotary_dim -= self.rotary_dim % 2 # Ensure even dimension
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, self.rotary_dim, 2).float() / self.rotary_dim))
+ self.register_buffer('inv_freq', inv_freq)
+ elif config.attention_type == 'alibi':
+ slopes = self.get_alibi_slopes(self.n_head)
+ self.register_buffer('alibi_slopes', slopes)
+ elif config.attention_type == 'relative':
+ num_rel_dis = 2 * config.block_size - 1
+ self.relative_positions = nn.Embedding(num_rel_dis, self.n_head)
+ # else: default attention (nothing extra to define)
+
+ def get_alibi_slopes(self, n_heads):
+ def get_slopes(n):
+ import math
+ def get_slopes_power_of_2(n):
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
+ ratio = start
+ return [start * (ratio ** i) for i in range(n)]
+ if math.log2(n).is_integer():
+ return torch.Tensor(get_slopes_power_of_2(n))
+ else:
+ closest_power_of_2 = 2 ** math.floor(math.log2(n))
+ slopes = get_slopes_power_of_2(closest_power_of_2)
+ extra_slopes = get_slopes(2 * closest_power_of_2)[0::2][:n - closest_power_of_2]
+ return torch.Tensor(slopes + extra_slopes)
+ slopes = get_slopes(n_heads)
+ return slopes.view(n_heads, 1, 1)
+
+ def apply_rope(self, x):
+ # x: (B, n_head, T, head_dim)
+ seq_len = x.size(-2)
+ device = x.device
+ t = torch.arange(seq_len, device=device, dtype=self.inv_freq.dtype)
+ freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
+ emb = torch.cat((freqs.sin(), freqs.cos()), dim=-1) # (T, rotary_dim)
+ emb = emb[None, None, :, :] # (1, 1, T, rotary_dim)
+ x1 = x[..., :self.rotary_dim]
+ x2 = x[..., self.rotary_dim:]
+ x1_rot = x1 * emb + torch.flip(x1, dims=[-1]) * torch.flip(emb, dims=[-1])
+ x = torch.cat((x1_rot, x2), dim=-1)
+ return x
+
+ def forward(self, x, layer_past=None):
+ B, T, C = x.size()
+ qkv = self.c_attn(x).view(B, T, 3, self.n_head, self.head_dim)
+ qkv = qkv.permute(2, 0, 3, 1, 4) # (3, B, n_head, T, head_dim)
+ q, k, v = qkv[0], qkv[1], qkv[2] # Each is (B, n_head, T, head_dim)
+
+ if self.config.attention_type == 'rope':
+ q = self.apply_rope(q)
+ k = self.apply_rope(k)
+
+ # Decide whether to use Flash Attention based on training/evaluation mode and tracking flags
+ if use_flash_attn and self.config.attention_type in ['default', 'rope'] and not (self.config.track_attention_patterns and not self.training):
+ # Use PyTorch's scaled_dot_product_attention which leverages Flash Attention 2
+ y = F.scaled_dot_product_attention(
+ q, k, v, attn_mask=None,
+ dropout_p=self.dropout if self.training else 0.0,
+ is_causal=True
+ )
+ else:
+ # Standard attention mechanism
+ attn_scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
+
+ if self.config.attention_type == 'alibi':
+ position_ids = torch.arange(T, device=x.device).unsqueeze(0).unsqueeze(0)
+ alibi = self.alibi_slopes.to(x.device) * position_ids # (n_head, 1, T)
+ attn_scores = attn_scores + alibi
+
+ elif self.config.attention_type == 'relative':
+ positions = torch.arange(-T+1, T, device=x.device)
+ rel_pos = self.relative_positions(positions + T -1)
+ attn_scores = attn_scores + rel_pos
+
+ # Apply causal mask
+ causal_mask = torch.tril(torch.ones(T, T, device=x.device)).view(1, 1, T, T)
+ attn_scores = attn_scores.masked_fill(causal_mask == 0, float('-inf'))
+
+ attn_weights = F.softmax(attn_scores, dim=-1)
+ attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Collect attention patterns if required
+ if self.config.track_attention_patterns and not self.training:
+ self.attn_weights = attn_weights.detach().cpu()
+ y = torch.matmul(attn_weights, v)
+
+ y = y.transpose(1, 2).contiguous().view(B, T, C)
+ y = self.resid_dropout(self.c_proj(y))
+ return y
+
+class MLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
+ self.gelu = nn.GELU()
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
+ self.dropout = nn.Dropout(config.dropout)
+ def forward(self, x):
+ x = self.c_fc(x)
+ x = self.gelu(x)
+ x = self.c_proj(x)
+ x = self.dropout(x)
+ return x
+
+class Block(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
+ self.attn = CausalSelfAttention(config)
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
+ self.mlp = MLP(config)
+ def forward(self, x):
+ x = x + self.attn(self.ln_1(x))
+ x = x + self.mlp(self.ln_2(x))
+ return x
+
+@dataclass
+class GPTConfig:
+ block_size: int = 1024
+ vocab_size: int = 50304
+ n_layer: int = 12
+ n_head: int = 12
+ n_embd: int = 768
+ dropout: float = 0.0
+ bias: bool = True
+ embedding_type: str = 'default' # Default uses learned positional embeddings
+ attention_type: str = 'default' # Default attention without any modifications
+ track_activations: bool = False
+ track_attention_patterns: bool = False
+
+class GPT(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ assert config.vocab_size is not None
+ assert config.block_size is not None
+ self.config = config
+
+ self.transformer = nn.ModuleDict()
+ self.transformer['wte'] = nn.Embedding(config.vocab_size, config.n_embd)
+
+ if config.embedding_type in ['learned', 'default']:
+ self.transformer['wpe'] = nn.Embedding(config.block_size, config.n_embd)
+ self.pos_emb = None
+ elif config.embedding_type == 'none':
+ self.transformer['wpe'] = None
+ self.pos_emb = None
+ else:
+ self.transformer['wpe'] = None
+ position = torch.arange(0, config.block_size)
+ pe = get_positional_encoding(position, config.n_embd, config.embedding_type, config.block_size)
+ self.register_buffer('pos_emb', pe)
+
+ self.transformer['drop'] = nn.Dropout(config.dropout)
+ self.transformer['h'] = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
+ self.transformer['ln_f'] = LayerNorm(config.n_embd, bias=config.bias)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
+ self.transformer['wte'].weight = self.lm_head.weight # Weight tying
+
+ self.apply(self._init_weights)
+ for pn, p in self.named_parameters():
+ if pn.endswith('c_proj.weight'):
+ nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer))
+
+ # Initialize activations and attention patterns
+ self.activations = []
+ self.attention_patterns = []
+
+ print("Number of parameters: {:.2f}M".format(self.get_num_params() / 1e6))
+
+ def get_num_params(self, non_embedding=True):
+ n_params = sum(p.numel() for p in self.parameters())
+ if non_embedding and self.transformer['wpe'] is not None:
+ n_params -= self.transformer['wpe'].weight.numel()
+ return n_params
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Linear):
+ nn.init.normal_(module.weight, mean=0.0, std=0.02)
+ if module.bias is not None:
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.Embedding):
+ nn.init.normal_(module.weight, mean=0.0, std=0.02)
+
+ def forward(self, idx, targets=None):
+ device = idx.device
+ b, t = idx.size()
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
+ pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
+
+ tok_emb = self.transformer['wte'](idx) # token embeddings
+
+ if self.config.embedding_type in ['learned', 'default']:
+ pos_emb = self.transformer['wpe'](pos)
+ x = tok_emb + pos_emb
+ elif self.config.embedding_type == 'none':
+ x = tok_emb
+ else:
+ pos_emb = self.pos_emb[:t, :].to(device)
+ x = tok_emb + pos_emb.unsqueeze(0)
+
+ x = self.transformer['drop'](x)
+
+ # Reset activations and attention patterns if tracking
+ if self.config.track_activations and not self.training:
+ self.activations = []
+ if self.config.track_attention_patterns and not self.training:
+ self.attention_patterns = []
+
+ for block in self.transformer['h']:
+ x = block(x)
+ if self.config.track_activations and not self.training:
+ self.activations.append(x.detach().cpu())
+ if self.config.track_attention_patterns and not self.training:
+ if hasattr(block.attn, 'attn_weights'):
+ self.attention_patterns.append(block.attn.attn_weights)
+ x = self.transformer['ln_f'](x)
+ logits = self.lm_head(x)
+
+ if targets is not None:
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
+ else:
+ loss = None
+
+ return logits, loss
+
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
+ # Start with all candidate parameters
+ param_dict = {pn: p for pn, p in self.named_parameters() if p.requires_grad}
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
+ optim_groups = [
+ {'params': decay_params, 'weight_decay': weight_decay},
+ {'params': nodecay_params, 'weight_decay': 0.0},
+ ]
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
+ use_fused = fused_available and device_type == 'cuda'
+ extra_args = dict(fused=True) if use_fused else dict()
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
+ print(f"Using fused AdamW: {use_fused}")
+
+ return optimizer
+
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
+ """Estimate model flops utilization (MFU)"""
+ N = self.get_num_params()
+ cfg = self.config
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd // cfg.n_head, cfg.block_size
+ flops_per_token = 6 * N + 12 * L * H * Q * T
+ flops_per_fwdbwd = flops_per_token * T
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
+ flops_achieved = flops_per_iter * (1.0 / dt)
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
+ mfu = flops_achieved / flops_promised
+ return mfu
+
+ @torch.no_grad()
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
+ """Generate sequences of tokens from the model"""
+ for _ in range(max_new_tokens):
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
+ logits, _ = self(idx_cond)
+ logits = logits[:, -1, :] / temperature
+ if top_k is not None:
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
+ logits[logits < v[:, [-1]]] = -float('Inf')
+ probs = F.softmax(logits, dim=-1)
+ idx_next = torch.multinomial(probs, num_samples=1)
+ idx = torch.cat((idx, idx_next), dim=1)
+ return idx
diff --git a/.ipynb_checkpoints/train-checkpoint.py b/.ipynb_checkpoints/train-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ada39b1bac0ea353ddead0e07ad1d75f7a96a92
--- /dev/null
+++ b/.ipynb_checkpoints/train-checkpoint.py
@@ -0,0 +1,545 @@
+# torchrun --standalone --nproc_per_node=2 train.py --batch_size=96
+
+# train.py
+import os
+import time
+import math
+from contextlib import nullcontext
+import json
+
+import numpy as np
+import torch
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.distributed import init_process_group, destroy_process_group
+import pandas as pd
+
+import tiktoken
+from model import GPTConfig, GPT
+
+# Import wandb and tqdm
+import wandb
+from tqdm.auto import tqdm
+
+# -----------------------------------------------------------------------------
+# Default configuration with added positional encoding options
+# I/O
+out_dir = 'out'
+eval_interval = 100 # Evaluate every 100 iterations
+log_interval = 1 # Log every iteration
+eval_iters = 100
+eval_only = False
+always_save_checkpoint = True
+init_from = 'scratch' # 'scratch' | 'resume' | 'checkpoint'
+checkpoint_path = '' # Path to a specific checkpoint to load
+# wandb logging
+wandb_log = True
+wandb_project = 'gpt2_positional_encodings_100B'
+wandb_run_name = 'experiment'
+# data
+dataset = 'fineweb'
+gradient_accumulation_steps = 40
+batch_size = 12
+block_size = 512
+# model
+n_layer = 4
+n_head = 4
+n_embd = 256
+dropout = 0.0
+bias = False
+# adamw optimizer
+learning_rate = 6e-4
+max_iters = 10000
+weight_decay = 1e-1
+beta1 = 0.9
+beta2 = 0.95
+grad_clip = 1.0
+# learning rate decay settings
+decay_lr = True
+warmup_iters = 100
+lr_decay_iters = 10000
+min_lr = 6e-5
+# DDP settings
+backend = 'nccl'
+# system
+device = 'cuda'
+dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16'
+compile = True
+# Positional Encodings
+embedding_types = ['wavelet']
+attention_types = ['default']
+# Data collection options
+collect_attention_patterns = False # Set to True to collect attention patterns
+collect_activations = False # Set to True to collect activations
+# Evaluation datasets
+eval_datasets = ['wikitext-103-v1', 'ptb', 'lambada'] # WikiText-103 and Penn Treebank
+seed = 1337
+# -----------------------------------------------------------------------------
+config_keys = [k for k, v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str, list, tuple))]
+exec(open('configurator.py').read())
+config = {k: globals()[k] for k in config_keys}
+# -----------------------------------------------------------------------------
+
+def is_compatible(embedding_type, attention_type):
+ # Incompatible combinations can be specified here
+ incompatible_combinations = [
+ # If specific combinations are incompatible
+ ]
+
+ # If embedding_type or attention_type is 'none', some attention methods may not function properly
+ if embedding_type == 'none' and attention_type in ['relative', 'rope']:
+ return False
+
+ # 'rope' attention requires even dimension per head
+ if attention_type == 'rope' and ((n_embd // n_head) % 2 != 0):
+ return False
+
+ return (embedding_type, attention_type) not in incompatible_combinations
+
+def main():
+ # Initialize DDP if needed
+ global gradient_accumulation_steps
+ ddp = int(os.environ.get('RANK', -1)) != -1
+ if ddp:
+ init_process_group(backend=backend)
+ ddp_rank = int(os.environ['RANK'])
+ ddp_local_rank = int(os.environ['LOCAL_RANK'])
+ ddp_world_size = int(os.environ['WORLD_SIZE'])
+ device_local = f'cuda:{ddp_local_rank}'
+ torch.cuda.set_device(device_local)
+ master_process = ddp_rank == 0
+ seed_offset = ddp_rank
+ assert gradient_accumulation_steps % ddp_world_size == 0
+ gradient_accumulation_steps //= ddp_world_size
+ else:
+ master_process = True
+ seed_offset = 0
+ ddp_world_size = 1
+ device_local = device # Use the default device
+
+ tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size
+ if master_process:
+ print(f"Tokens per iteration will be: {tokens_per_iter:,}")
+
+ if master_process:
+ os.makedirs(out_dir, exist_ok=True)
+
+ # Set random seed
+ global seed
+ seed += seed_offset
+ torch.manual_seed(seed)
+ np.random.seed(seed)
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.allow_tf32 = True
+ device_type = 'cuda' if 'cuda' in device_local else 'cpu'
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
+ ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
+
+ # Load tokenizer using tiktoken
+ tokenizer = tiktoken.get_encoding("gpt2")
+
+ # Prepare evaluation datasets
+ eval_data = {}
+ for eval_dataset in eval_datasets:
+ eval_data_path = os.path.join('data', eval_dataset)
+ if not os.path.exists(eval_data_path):
+ raise FileNotFoundError(f"Dataset {eval_dataset} not found. Please run prepare_evaluation_data.py first.")
+
+ if eval_dataset in ['wikitext-2-v1', 'wikitext-103-v1']:
+ train_file = [f for f in os.listdir(eval_data_path) if f.startswith('train')][0]
+ val_file = [f for f in os.listdir(eval_data_path) if f.startswith('validation')][0]
+
+ train_df = pd.read_parquet(os.path.join(eval_data_path, train_file))
+ val_df = pd.read_parquet(os.path.join(eval_data_path, val_file))
+
+ train_text = '\n'.join(train_df['text'])
+ val_text = '\n'.join(val_df['text'])
+
+ elif eval_dataset == 'ptb':
+ with open(os.path.join(eval_data_path, 'train.txt'), 'r') as f:
+ train_text = f.read()
+ with open(os.path.join(eval_data_path, 'valid.txt'), 'r') as f:
+ val_text = f.read()
+
+ elif eval_dataset == 'lambada':
+ with open(os.path.join(eval_data_path, 'lambada_test.jsonl'), 'r') as f:
+ data = [json.loads(line) for line in f]
+ test_text = '\n'.join([item['text'] for item in data])
+ train_text = test_text[:len(test_text)//2] # Use first half as pseudo-train
+ val_text = test_text[len(test_text)//2:] # Use second half as pseudo-val
+
+ else:
+ raise ValueError(f"Unknown dataset: {eval_dataset}")
+
+ # Tokenize
+ train_ids = tokenizer.encode_ordinary(train_text)
+ val_ids = tokenizer.encode_ordinary(val_text)
+
+ # Convert to numpy arrays
+ train_ids = np.array(train_ids, dtype=np.uint16)
+ val_ids = np.array(val_ids, dtype=np.uint16)
+
+ eval_data[eval_dataset] = {'train': train_ids, 'val': val_ids}
+
+ # Data loading
+ data_dir = os.path.join('data', dataset)
+ # Update the get_batch function to handle evaluation datasets
+ def get_batch(split, dataset='main'):
+ if dataset == 'main':
+ if split == 'train':
+ data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
+ else:
+ data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
+ else:
+ data = eval_data[dataset][split]
+
+ ix = torch.randint(len(data) - block_size, (batch_size,))
+ x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix])
+ y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix])
+ if device_type == 'cuda':
+ x, y = x.pin_memory().to(device_local, non_blocking=True), y.pin_memory().to(device_local, non_blocking=True)
+ else:
+ x, y = x.to(device_local), y.to(device_local)
+ return x, y
+
+ # Attempt to derive vocab_size from the dataset
+ meta_path = os.path.join(data_dir, 'meta.json')
+ meta_vocab_size = None
+ if os.path.exists(meta_path):
+ with open(meta_path, 'r') as f:
+ meta = json.load(f)
+ meta_vocab_size = meta['vocab_size']
+ if master_process:
+ print(f"Found vocab_size = {meta_vocab_size} (inside {meta_path})")
+
+ # Helps estimate loss and collect attention patterns and activations
+ @torch.no_grad()
+ def estimate_loss(model, collect_attention_patterns=False, collect_activations=False, save_dir=None, max_batches_to_save=None):
+ out = {}
+ model.eval()
+ # Access the underlying model if wrapped with DDP
+ raw_model = model.module if hasattr(model, 'module') else model
+
+ # Set tracking flags on the underlying model
+ raw_model.config.track_attention_patterns = collect_attention_patterns
+ raw_model.config.track_activations = collect_activations
+
+ if collect_attention_patterns or collect_activations:
+ if save_dir is None:
+ raise ValueError("save_dir must be specified when collecting attention patterns or activations.")
+ if master_process:
+ os.makedirs(save_dir, exist_ok=True)
+
+ for split in ['train', 'val']:
+ losses = torch.zeros(eval_iters)
+ save_count = 0 # Counter for saved batches
+ for k in range(eval_iters):
+ X, Y = get_batch(split)
+ with ctx:
+ logits, loss = model(X, Y)
+ losses[k] = loss.item()
+ # Collect and save attention patterns and activations
+ if (collect_attention_patterns or collect_activations) and save_count < (max_batches_to_save or eval_iters):
+ if collect_attention_patterns or collect_activations:
+ if master_process:
+ batch_dir = os.path.join(save_dir, f"{split}_batch_{k}")
+ os.makedirs(batch_dir, exist_ok=True)
+ # Save activations
+ if collect_activations and hasattr(raw_model, 'activations'):
+ for idx, activation in enumerate(raw_model.activations):
+ activation_path = os.path.join(batch_dir, f"activation_layer_{idx}.pt")
+ torch.save(activation, activation_path)
+ # Save attention patterns
+ if collect_attention_patterns and hasattr(raw_model, 'attention_patterns'):
+ for idx, attention in enumerate(raw_model.attention_patterns):
+ attention_path = os.path.join(batch_dir, f"attention_layer_{idx}.pt")
+ torch.save(attention, attention_path)
+ # Clear activations and attention patterns from the model
+ raw_model.activations = []
+ raw_model.attention_patterns = []
+ save_count += 1
+ out[split] = losses.mean().item()
+
+ # Evaluate on additional datasets
+ for eval_dataset in eval_datasets:
+ split_losses = {}
+ for split in ['train', 'val']:
+ losses = torch.zeros(eval_iters)
+ save_count = 0 # Counter for saved batches
+ for k in range(eval_iters):
+ X, Y = get_batch(split, dataset=eval_dataset)
+ with ctx:
+ logits, loss = model(X, Y)
+ losses[k] = loss.item()
+ # Collect and save attention patterns and activations
+ if (collect_attention_patterns or collect_activations) and save_count < (max_batches_to_save or eval_iters):
+ if collect_attention_patterns or collect_activations:
+ if master_process:
+ batch_dir = os.path.join(save_dir, f"{eval_dataset}_{split}_batch_{k}")
+ os.makedirs(batch_dir, exist_ok=True)
+ # Save activations
+ if collect_activations and hasattr(raw_model, 'activations'):
+ for idx, activation in enumerate(raw_model.activations):
+ activation_path = os.path.join(batch_dir, f"activation_layer_{idx}.pt")
+ torch.save(activation, activation_path)
+ # Save attention patterns
+ if collect_attention_patterns and hasattr(raw_model, 'attention_patterns'):
+ for idx, attention in enumerate(raw_model.attention_patterns):
+ attention_path = os.path.join(batch_dir, f"attention_layer_{idx}.pt")
+ torch.save(attention, attention_path)
+ # Clear activations and attention patterns from the model
+ raw_model.activations = []
+ raw_model.attention_patterns = []
+ save_count += 1
+ split_losses[split] = losses.mean().item()
+ out[eval_dataset] = split_losses
+ model.train()
+ # Reset tracking flags
+ raw_model.config.track_attention_patterns = False
+ raw_model.config.track_activations = False
+ return out
+
+ # Learning rate decay scheduler
+ def get_lr(it):
+ if it < warmup_iters:
+ return learning_rate * it / warmup_iters
+ if it > lr_decay_iters:
+ return min_lr
+ decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
+ coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
+ return min_lr + coeff * (learning_rate - min_lr)
+
+ # Training loop over positional encoding combinations
+ for embedding_type in embedding_types:
+ for attention_type in attention_types:
+ if not is_compatible(embedding_type, attention_type):
+ if master_process:
+ print(f"Skipping incompatible combination: Embedding={embedding_type}, Attention={attention_type}")
+ continue
+
+ # Configure model arguments
+ model_args = dict(
+ n_layer=n_layer,
+ n_head=n_head,
+ n_embd=n_embd,
+ block_size=block_size,
+ bias=bias,
+ vocab_size=None,
+ dropout=dropout,
+ embedding_type=embedding_type,
+ attention_type=attention_type,
+ track_activations=False,
+ track_attention_patterns=False,
+ )
+
+ # Initialize or resume model
+ iter_num = 0
+ best_val_loss = 1e9 # initialize best val loss to a high value
+ checkpoint = None
+ run_id = None # Initialize run_id to None
+
+ if init_from == 'scratch':
+ if master_process:
+ print(f"\nInitializing new model with embedding_type={embedding_type}, attention_type={attention_type}")
+ if meta_vocab_size is None:
+ if master_process:
+ print("Defaulting to vocab_size of GPT-2 to 50257")
+ model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50257
+ gptconf = GPTConfig(**model_args)
+ model = GPT(gptconf)
+ elif init_from == 'resume':
+ # Resume from the latest checkpoint
+ ckpt_path = os.path.join(out_dir, f"ckpt_{embedding_type}_{attention_type}.pt")
+ if not os.path.exists(ckpt_path):
+ raise FileNotFoundError(f"Checkpoint not found at {ckpt_path}")
+ if master_process:
+ print(f"\nResuming training from checkpoint {ckpt_path}")
+ checkpoint = torch.load(ckpt_path, map_location=device_local)
+ gptconf = GPTConfig(**checkpoint['model_args'])
+ model = GPT(gptconf)
+ model.load_state_dict(checkpoint['model'])
+ iter_num = checkpoint['iter_num']
+ best_val_loss = checkpoint['best_val_loss']
+ seed = checkpoint.get('seed', seed)
+ run_id = checkpoint.get('wandb_run_id', None)
+ elif init_from == 'checkpoint':
+ # Resume from a specific checkpoint
+ if not checkpoint_path or not os.path.exists(checkpoint_path):
+ raise FileNotFoundError(f"Checkpoint not found at {checkpoint_path}")
+ if master_process:
+ print(f"\nLoading model from checkpoint {checkpoint_path}")
+ checkpoint = torch.load(checkpoint_path, map_location=device_local)
+ gptconf = GPTConfig(**checkpoint['model_args'])
+ model = GPT(gptconf)
+ model.load_state_dict(checkpoint['model'])
+ iter_num = checkpoint['iter_num']
+ best_val_loss = checkpoint['best_val_loss']
+ seed = checkpoint.get('seed', seed)
+ run_id = checkpoint.get('wandb_run_id', None)
+ else:
+ raise ValueError(f"Unknown init_from '{init_from}'")
+
+ # Set random seed
+ seed += seed_offset
+ torch.manual_seed(seed)
+ np.random.seed(seed)
+
+ model.to(device_local)
+ scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16'))
+ optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type)
+
+ # Load optimizer state if resuming
+ if checkpoint is not None:
+ optimizer.load_state_dict(checkpoint['optimizer'])
+
+ if compile:
+ if master_process:
+ print("Compiling the model... (takes a ~minute)")
+ unoptimized_model = model
+ model = torch.compile(model)
+
+ if ddp:
+ model = DDP(model, device_ids=[ddp_local_rank])
+
+ # Logging with WandB
+ if wandb_log and master_process:
+ run_name = f"{embedding_type}_{attention_type}_{wandb_run_name}"
+ # Initialize WandB
+ wandb.init(project=wandb_project, name=run_name, config=config, resume='allow', id=run_id)
+ # Save the run ID for resuming later
+ run_id = wandb.run.id
+ else:
+ run_id = None
+
+ # Training loop
+ X, Y = get_batch('train')
+ t0 = time.time()
+ local_iter_num = 0
+ raw_model = model.module if hasattr(model, 'module') else model
+ running_mfu = -1.0
+ progress_bar = tqdm(total=max_iters, initial=iter_num, desc=f"Training {embedding_type} + {attention_type}", disable=not master_process)
+ progress_bar_update_freq = 1 # Update progress bar every iteration
+
+ while True:
+ # Determine learning rate
+ lr = get_lr(iter_num) if decay_lr else learning_rate
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+ # Evaluate and checkpoint
+ if iter_num % eval_interval == 0 and iter_num > 0:
+ # Define save_dir for collected data
+ eval_data_dir = os.path.join('data', 'eval_data', f"{embedding_type}_{attention_type}", f"step_{iter_num}")
+ # Set a limit on the number of batches to save during evaluation
+ max_batches_to_save = 10 # Adjust this number as needed to control storage usage
+ losses = estimate_loss(model,
+ collect_attention_patterns=collect_attention_patterns,
+ collect_activations=collect_activations,
+ save_dir=eval_data_dir,
+ max_batches_to_save=max_batches_to_save)
+ if master_process:
+ print(f"\nStep {iter_num}:")
+ print(f"Train loss: {losses['train']:.4f}, Val loss: {losses['val']:.4f}")
+ for eval_dataset in eval_datasets:
+ print(f"{eval_dataset} - Train loss: {losses[eval_dataset]['train']:.4f}, Val loss: {losses[eval_dataset]['val']:.4f}")
+ # Log to wandb
+ if wandb_log:
+ wandb_metrics = {
+ "iter": iter_num,
+ "train/loss": losses['train'],
+ "val/loss": losses['val'],
+ "lr": lr,
+ "mfu": running_mfu * 100,
+ }
+ for eval_dataset in eval_datasets:
+ wandb_metrics[f"{eval_dataset}/train_loss"] = losses[eval_dataset]['train']
+ wandb_metrics[f"{eval_dataset}/val_loss"] = losses[eval_dataset]['val']
+ wandb.log(wandb_metrics, step=iter_num)
+ if losses['val'] < best_val_loss or always_save_checkpoint:
+ best_val_loss = losses['val']
+ if iter_num > 0:
+ checkpoint = {
+ 'model': raw_model.state_dict(),
+ 'optimizer': optimizer.state_dict(),
+ 'model_args': model_args,
+ 'iter_num': iter_num,
+ 'best_val_loss': best_val_loss,
+ 'config': config,
+ 'seed': seed,
+ 'wandb_run_id': run_id
+ }
+ ckpt_path = os.path.join(out_dir, f"ckpt_{embedding_type}_{attention_type}.pt")
+ if master_process:
+ print(f"Saving checkpoint to {ckpt_path}")
+ torch.save(checkpoint, ckpt_path)
+ # Update progress bar postfix
+ if master_process:
+ postfix_dict = {
+ 'train_loss': f"{losses['train']:.4f}",
+ 'val_loss': f"{losses['val']:.4f}"
+ }
+ for eval_dataset in eval_datasets:
+ postfix_dict[f"{eval_dataset}_val_loss"] = f"{losses[eval_dataset]['val']:.4f}"
+ progress_bar.set_postfix(postfix_dict)
+
+ if eval_only:
+ break
+
+ # Forward backward update
+ for micro_step in range(gradient_accumulation_steps):
+ if ddp:
+ model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1)
+ with ctx:
+ logits, loss = model(X, Y)
+ loss = loss / gradient_accumulation_steps
+ X, Y = get_batch('train')
+ scaler.scale(loss).backward()
+ if grad_clip != 0.0:
+ scaler.unscale_(optimizer)
+ torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
+ scaler.step(optimizer)
+ scaler.update()
+ optimizer.zero_grad(set_to_none=True)
+
+ # Logging
+ t1 = time.time()
+ dt = t1 - t0
+ t0 = t1
+ if iter_num % log_interval == 0:
+ lossf = loss.item() * gradient_accumulation_steps
+ if local_iter_num >= 5:
+ mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
+ running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu
+ if master_process:
+ progress_bar.set_postfix({
+ 'loss': f"{lossf:.4f}",
+ 'lr': f"{lr:.2e}",
+ 'mfu': f"{running_mfu*100:.2f}%",
+ 'time_per_iter_ms': f"{dt * 1000:.2f}ms",
+ })
+ if wandb_log:
+ wandb.log({
+ "iter": iter_num,
+ "train/loss": lossf,
+ "lr": lr,
+ "mfu": running_mfu * 100,
+ "time_per_iter_ms": dt * 1000,
+ }, step=iter_num)
+ iter_num += 1
+ local_iter_num += 1
+ if master_process:
+ progress_bar.update(progress_bar_update_freq)
+ # Termination conditions
+ if iter_num > max_iters:
+ break
+
+ if master_process:
+ progress_bar.close()
+ if wandb_log and master_process:
+ wandb.finish()
+
+ # Destroy the process group after all models have been trained
+ if ddp:
+ destroy_process_group()
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,+archive,ubuntu,ppa,ws.op=getSigningKeyData-application,json,c76e9ed0b661c7fa5da42e8fb2da319e b/.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,+archive,ubuntu,ppa,ws.op=getSigningKeyData-application,json,c76e9ed0b661c7fa5da42e8fb2da319e
new file mode 100644
index 0000000000000000000000000000000000000000..23822351289a692e295aa0aa0b409249a892c630
--- /dev/null
+++ b/.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,+archive,ubuntu,ppa,ws.op=getSigningKeyData-application,json,c76e9ed0b661c7fa5da42e8fb2da319e
@@ -0,0 +1,22 @@
+status: 200
+date: Mon, 05 Feb 2024 23:25:35 GMT
+server: gunicorn
+x-powered-by: Zope (www.zope.org), Python (www.python.org)
+content-security-policy: frame-ancestors 'self';
+content-type: application/json
+strict-transport-security: max-age=15552000
+vary: Accept,Accept-Encoding
+x-content-type-options: nosniff
+x-frame-options: SAMEORIGIN
+x-launchpad-revision: 9643586c585856148a18782148972ae9c1179d06
+x-lazr-notifications: []
+x-xss-protection: 1; mode=block
+x-vcs-revision: 9643586c585856148a18782148972ae9c1179d06
+x-request-id: 452e0c68-aa99-4bb4-abc3-237c7bb39fae
+content-length: 1641
+-content-encoding: gzip
+content-location: https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa?ws.op=getSigningKeyData
+-varied-accept: application/json
+-varied-accept-encoding: gzip, deflate
+
+"-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFl8fYEBEADQmGZ6pDrwY9iH9DVlwNwTOvOZ7q7lHXPl/TLfMs1tckMc/D9a\nhsdBN9VWtMmo+RySvhkIe8X15r65TFs2HE8ft6j2e/4K472pObM1hB+ajiU/wYX2\nSyq7DBlNm6YMP5/SyQzRxqis4Ja1uUjW4Q5/Csdf5In8uMzXj5D1P7qOiP2aNa0E\nr3w6PXWRTuTihWZOsHv8npyVYDBRR6gEZbd3r86snI/7o8Bfmad3KjbxL7aOdNMw\nAqQFaNKl7Y+UJpv1CNFIf+twcOoC0se1SrsVJlAH9HNHM7XGQsPUwpNvQlcmvr+t\n1vVS2m72lk3gyShDuJpi1TifGw+DoTqu54U0k+0sZm4pnQVeiizNkefU2UqOoGlt\n4oiG9nIhSX04xRlGes3Ya0OjNI5b1xbcYoR+r0c3odI+UCw3VSZtKDX/xlH1o/82\nb8ouXeE7LA1i4DvGNj4VSvoxv4ggIznxMf+PkWXWKwRGsbAAXF52rr4FUaeaKoIU\nDkJqHXAxrB3PQslZ+ZgBEukkQZF76NkqRqP1E7FXzZZMo2eEL7vtnhSzUlanOf42\nECBoWHVoZQaRFMNbGpqlg9aWedHGyetMStS3nH1sqanr+i4I8VR/UH+ilarPTW3T\nE0apWlsH8+N3IKbRx2wgrRZNoQEuyVtvyewDFYShJB3Zxt7VCy67vKAl1QARAQAB\ntBxMYXVuY2hwYWQgUFBBIGZvciBkZWFkc25ha2VziQI4BBMBAgAiBQJZfH2BAhsD\nBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRC6aTI2anVXdvwhD/4oI3yckeKn\n9aJNNTJsyw4ydMkIAOdG+jbZsYv/rN73UVQF1RA8HC71SDmbd0Nu80koBOX+USuL\nvvhoMIsARlD5dLx5f/zaQcYWJm/BtsMF/eZ4s1xsenwW6PpXd8FpaTn1qtg/8+O9\n99R4uSetAhhyf1vSRb/8U0sgSQd38mpZZFq352UuVisXnmCThj621loQubYJ3lwU\nLSLs8wmgo4XIYH7UgdavV9dfplPh0M19RHQL3wTyQP2KRNRq1rG7/n1XzUwDyqY6\neMVhdVhvnxAGztvdFCySVzBRr/rCw6quhcYQwBqdqaXhz63np+4mlUNfd8Eu+Vas\nb/tbteF/pDu0yeFMpK4X09Cwn2kYYCpq4XujijW+iRWb4MO3G8LLi8oBAHP/k0CM\n/QvSRbbG8JDQkQDH37Efm8iE/EttJTixjKAIfyugmvEHfcrnxaMoBioa6h6McQrM\nvI8bJirxorJzOVF4kY7xXvMYwjzaDC8G0fTA8SzQRaShksR3USXZjz8vS6tZ+YNa\nmRHPoZ3Ua0bz4t2aCcu/fknVGsXcNBazNIK9WF2665Ut/b7lDbojXsUZ3PpuqOoe\nGQL9LRj7nmCI6ugoKkNp8ZXcGJ8BGw37Wep2ztyzDohXp6f/4mGgy2KYV9R4S8D5\nyBDUU6BS7Su5nhQMStfdfr4FffLmnvFC9w==\n=7hFk\n-----END PGP PUBLIC KEY BLOCK-----\n"
\ No newline at end of file
diff --git a/.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,name=%22ppa%22&ws.op=getPPAByName-application,json,bca461ac71b1143128b6fbebfcd56851 b/.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,name=%22ppa%22&ws.op=getPPAByName-application,json,bca461ac71b1143128b6fbebfcd56851
new file mode 100644
index 0000000000000000000000000000000000000000..ae96b02e3589d1f1f387539332b4a11f0334fe00
--- /dev/null
+++ b/.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,name=%22ppa%22&ws.op=getPPAByName-application,json,bca461ac71b1143128b6fbebfcd56851
@@ -0,0 +1,22 @@
+status: 200
+date: Mon, 05 Feb 2024 23:25:35 GMT
+server: gunicorn
+x-powered-by: Zope (www.zope.org), Python (www.python.org)
+content-security-policy: frame-ancestors 'self';
+content-type: application/json
+strict-transport-security: max-age=15552000
+vary: Accept,Accept-Encoding
+x-content-type-options: nosniff
+x-frame-options: SAMEORIGIN
+x-launchpad-revision: 9643586c585856148a18782148972ae9c1179d06
+x-lazr-notifications: []
+x-xss-protection: 1; mode=block
+x-vcs-revision: 9643586c585856148a18782148972ae9c1179d06
+x-request-id: ee5c3fda-04a1-41df-8644-2766d8c98b27
+content-length: 4377
+-content-encoding: gzip
+content-location: https://api.launchpad.net/devel/~deadsnakes?name=%22ppa%22&ws.op=getPPAByName
+-varied-accept: application/json
+-varied-accept-encoding: gzip, deflate
+
+{"self_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa", "web_link": "https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa", "resource_type_link": "https://api.launchpad.net/devel/#archive", "owner_link": "https://api.launchpad.net/devel/~deadsnakes", "name": "ppa", "displayname": "New Python Versions", "reference": "~deadsnakes/ubuntu/ppa", "distribution_link": "https://api.launchpad.net/devel/ubuntu", "private": false, "suppress_subscription_notifications": false, "dependencies_collection_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa/dependencies", "description": "This PPA contains more recent Python versions packaged for Ubuntu.\n\nDisclaimer: there's no guarantee of timely updates in case of security problems or other issues. If you want to use them in a security-or-otherwise-critical environment (say, on a production server), you do so at your own risk.\n\nUpdate Note\n===========\nPlease use this repository instead of ppa:fkrull/deadsnakes.\n\nReporting Issues\n================\n\nIssues can be reported in the master issue tracker at:\nhttps://github.com/deadsnakes/issues/issues\n\nSupported Ubuntu and Python Versions\n====================================\n\n- Ubuntu 20.04 (focal) Python3.5 - Python3.7, Python3.9 - Python3.13\n- Ubuntu 22.04 (jammy) Python3.7 - Python3.9, Python3.11 - Python3.13\n- Note: Python2.7 (all), Python 3.8 (focal), Python 3.10 (jammy) are not provided by deadsnakes as upstream ubuntu provides those packages.\n\nWhy some packages aren't built:\n- Note: for focal, older python versions require libssl\u003c1.1 so they are not currently built\n- Note: for jammy, older python versions requre libssl\u003c3 so they are not currently built\n- If you need these, reach out to asottile to set up a private ppa\n\nThe packages may also work on other versions of Ubuntu or Debian, but that is not tested or supported.\n\nPackages\n========\n\nThe packages provided here are loosely based on the debian upstream packages with some modifications to make them more usable as non-default pythons and on ubuntu. As such, the packages follow debian's patterns and often do not include a full python distribution with just `apt install python#.#`. Here is a list of packages that may be useful along with the default install:\n\n- `python#.#-dev`: includes development headers for building C extensions\n- `python#.#-venv`: provides the standard library `venv` module\n- `python#.#-distutils`: provides the standard library `distutils` module\n- `python#.#-lib2to3`: provides the `2to3-#.#` utility as well as the standard library `lib2to3` module\n- `python#.#-gdbm`: provides the standard library `dbm.gnu` module\n- `python#.#-tk`: provides the standard library `tkinter` module\n\nThird-Party Python Modules\n==========================\n\nPython modules in the official Ubuntu repositories are packaged to work with the Python interpreters from the official repositories. Accordingly, they generally won't work with the Python interpreters from this PPA. As an exception, pure-Python modules for Python 3 will work, but any compiled extension modules won't.\n\nTo install 3rd-party Python modules, you should use the common Python packaging tools. For an introduction into the Python packaging ecosystem and its tools, refer to the Python Packaging User Guide:\nhttps://packaging.python.org/installing/\n\nSources\n=======\nThe package sources are available at:\nhttps://github.com/deadsnakes/\n\nNightly Builds\n==============\n\nFor nightly builds, see ppa:deadsnakes/nightly https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly", "signing_key_fingerprint": "F23C5A6CF475977595C89F51BA6932366A755776", "require_virtualized": true, "build_debug_symbols": false, "publish_debug_symbols": false, "permit_obsolete_series_uploads": false, "authorized_size": 10240, "status": "Active", "external_dependencies": null, "processors_collection_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa/processors", "enabled_restricted_processors_collection_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa/enabled_restricted_processors", "publishing_method": "Local", "repository_format": "Debian", "publish": true, "relative_build_score": 0, "http_etag": "\"e23cc285682ec6a9eb87828016a9b36731e6dc4d-841a2599806ee01fd2d7dc9450f94c9cd2dba95c\""}
\ No newline at end of file
diff --git a/.local/share/Trash/info/train_005.bin.trashinfo b/.local/share/Trash/info/train_005.bin.trashinfo
new file mode 100644
index 0000000000000000000000000000000000000000..b806a7deffe6759af2e504545f75a6cbf32016c6
--- /dev/null
+++ b/.local/share/Trash/info/train_005.bin.trashinfo
@@ -0,0 +1,3 @@
+[Trash Info]
+Path=/root/data/fineweb/train_005.bin
+DeletionDate=2024-09-26T05:50:34
diff --git a/.local/share/jupyter/nbextensions/exercise2/main.js b/.local/share/jupyter/nbextensions/exercise2/main.js
new file mode 100644
index 0000000000000000000000000000000000000000..f83609104d7ac7ccbadd5be78bf98324f494cdb4
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/exercise2/main.js
@@ -0,0 +1,169 @@
+// Copyright (c) IPython-Contrib Team.
+// Distributed under the terms of the Modified BSD License.
+
+// Hide or display solutions in a notebook
+
+// dec 6, 2017 @jcb91: use bootstrap 'hidden' class to play nicely with collapsible_headings
+// december 30, 2015: update to notebook 4.1.x
+// updated on december 22, 2015 to allow consecutive exercises
+// exercise2: built by @jfbercher from an earlier work by @junasch october 2015) - see readme.md
+
+define([
+ 'base/js/namespace',
+ 'jquery',
+ 'require',
+ 'base/js/events',
+], function(IPython, $, requirejs, events) {
+ "use strict";
+
+ var cfg = {
+ add_button: true,
+ use_hotkey: true,
+ hotkey: 'Alt-D',
+ };
+
+ /**
+ * handle click event
+ *
+ * @method click_solution_lock
+ * @param evt {Event} jquery event
+ */
+ function click_solution_lock(evt) {
+ var cell = IPython.notebook.get_selected_cell();
+ var is_locked = cell.metadata.solution2 === 'hidden';
+ cell.metadata.solution2 = is_locked ? 'shown' : 'hidden';
+ element_set_locked(cell, !is_locked);
+ cell = IPython.notebook.get_next_cell(cell);
+ while (cell !== null && cell.metadata.solution2 !== undefined && !cell.metadata.solution2_first) {
+ cell.element.toggleClass('hidden', !is_locked);
+ cell.metadata.solution2 = is_locked ? 'shown' : 'hidden';
+ cell = IPython.notebook.get_next_cell(cell);
+ }
+ }
+
+ /**
+ * Create or Remove an exercise in selected cells
+ *
+ * @method create_remove_exercise
+ *
+ */
+ function create_remove_exercise() {
+ var lcells = IPython.notebook.get_selected_cells();
+ // It is possible that no cell is selected
+ if (lcells.length < 1) {
+ alert("Exercise extension: \nPlease select some cells...");
+ return;
+ }
+
+ var cell = lcells[0];
+ if (cell.metadata.solution2_first) {
+ remove_element(cell);
+ delete cell.metadata.solution2_first;
+ while (cell !== null && cell.metadata.solution2 !== undefined && !cell.metadata.solution2_first) {
+ delete cell.metadata.solution2;
+ cell.element.removeClass('hidden');
+ cell = IPython.notebook.get_next_cell(cell);
+ }
+ }
+ else {
+ cell.metadata.solution2_first = true;
+ cell.metadata.solution2 = 'hidden';
+ add_element(cell);
+ for (var k = 1; k < lcells.length; k++) {
+ cell = lcells[k];
+ cell.element.addClass('hidden');
+ cell.metadata.solution2 = 'hidden';
+ }
+ }
+ }
+
+ /**
+ * Add a lock control to the given cell
+ */
+ var cbx = 0;
+ function add_element(cell) {
+ var ctrl = cell.element.find('.exercise');
+ if (ctrl.length > 0) return ctrl;
+ var locked = cell.metadata.solution2 === 'hidden';
+ cell.element.css('flex-wrap', 'wrap');
+ cbx += 1;
+ ctrl = $([
+ '
',
+ ' ',
+ '
',
+ ' ',
+ ' ',
+ '
',
+ '
'
+ ].join('\n'))
+ .appendTo(cell.element);
+ ctrl.find('input')
+ .on('click', click_solution_lock);
+ element_set_locked(cell, locked);
+ return ctrl;
+ }
+
+ function remove_element(cell) {
+ cell.element.find('.exercise').remove();
+ }
+
+ function element_set_locked(cell, locked) {
+ return cell.element.find('.exercise')
+ .prop('checked', !locked);
+ }
+
+ function refresh_exercises() {
+ var in_exercise = false;
+ IPython.notebook.get_cells().forEach(function(cell) {
+ if (in_exercise && cell.metadata.solution2 !== undefined && !cell.metadata.solution2_first) {
+ cell.element.toggleClass('hidden', cell.metadata.solution2 === 'hidden');
+ } else {
+ in_exercise = false;
+ }
+ if (!in_exercise && cell.metadata.solution2 !== undefined) {
+ in_exercise = true;
+ add_element(cell);
+ }
+ });
+ }
+
+ function load_ipython_extension() {
+ // add css
+ $('')
+ .attr('href', requirejs.toUrl('./main.css'))
+ .appendTo('head');
+
+ // Hide/display existing solutions at startup
+ events.on('notebook_loaded.Notebook', refresh_exercises);
+ if (IPython.notebook._fully_loaded) refresh_exercises();
+
+ var action_name = IPython.keyboard_manager.actions.register({
+ help : 'Exercise2: Create/Remove exercise',
+ help_index: 'ht',
+ icon : 'fa-toggle-on',
+ handler : create_remove_exercise,
+ }, 'create_remove_exercise', 'exercise2');
+
+ return IPython.notebook.config.loaded.then(function() {
+ $.extend(true, cfg, IPython.notebook.config.data.exercise2);
+
+ if (cfg.add_button) {
+ IPython.toolbar.add_buttons_group([action_name]);
+ }
+ if (cfg.use_hotkey && cfg.hotkey) {
+ var cmd_shrts = {};
+ cmd_shrts[cfg.hotkey] = action_name;
+ IPython.keyboard_manager.command_shortcuts.add_shortcuts(cmd_shrts);
+ }
+ }).catch(function(err) {
+ console.warn('[exercise2] error:', err);
+ });
+ }
+
+ return {
+ load_ipython_extension: load_ipython_extension,
+ };
+});
diff --git a/.local/share/jupyter/nbextensions/freeze/config.yaml b/.local/share/jupyter/nbextensions/freeze/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..035ed83519b4ce112d40b066983116c0e22c995a
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/freeze/config.yaml
@@ -0,0 +1,20 @@
+Type: IPython Notebook Extension
+Name: Freeze
+Description: Freeze cells (forbid editing and executing) or make them read-only
+Link: readme.md
+Icon: icon.png
+Main: main.js
+Compatibility: 4.x, 5.x
+Parameters:
+- name: Freeze.readonly_color
+ description: |
+ Color to use for read-only cell
+ default: '#fffef0'
+ input_type: color
+
+- name: Freeze.frozen_color
+ description: |
+ Color to use for frozen cell
+ default: '#f0feff'
+ input_type: color
+
diff --git a/.local/share/jupyter/nbextensions/freeze/icon.png b/.local/share/jupyter/nbextensions/freeze/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..d5f88f6a4d8938c72da3ecbded5419b517ad4807
Binary files /dev/null and b/.local/share/jupyter/nbextensions/freeze/icon.png differ
diff --git a/.local/share/jupyter/nbextensions/gist_it/icon.png b/.local/share/jupyter/nbextensions/gist_it/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..09e37656024aa727c20465d2a11685a2511ccfd4
Binary files /dev/null and b/.local/share/jupyter/nbextensions/gist_it/icon.png differ
diff --git a/.local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif b/.local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif
new file mode 100644
index 0000000000000000000000000000000000000000..da3a6160ab503dbb89f1fd7afc568cc92f391628
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3dc033a545fe3eccdeee6e66932f1a46de4d0cafe084d471165e750ede1dcc4f
+size 1749888
diff --git a/.local/share/jupyter/nbextensions/go_to_current_running_cell/eye.png b/.local/share/jupyter/nbextensions/go_to_current_running_cell/eye.png
new file mode 100644
index 0000000000000000000000000000000000000000..2624611c8d9b99a8a0e7fe3d6b3717ae5f957d6a
Binary files /dev/null and b/.local/share/jupyter/nbextensions/go_to_current_running_cell/eye.png differ
diff --git a/.local/share/jupyter/nbextensions/go_to_current_running_cell/main.js b/.local/share/jupyter/nbextensions/go_to_current_running_cell/main.js
new file mode 100644
index 0000000000000000000000000000000000000000..91f46fd1474bdd771423e6e34b486a2e78604662
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/go_to_current_running_cell/main.js
@@ -0,0 +1,126 @@
+// Copyright (c) Jupyter-Contrib Team.
+// Distributed under the terms of the Modified BSD License.
+
+// This is an extension allows you to jump to the current running cell.
+// You can also activate this functionality automatically,
+// i.e., your view is always scolling to the current cell.
+
+//
+// Keyboard shortcuts: Alt-I and Alt-down (works with single cells also -- this is useful!)
+// The extension is simple, create function and then register the action and shortkey separately,
+// so that user can update the shortkey according to their need.
+
+
+
+define([
+ 'base/js/namespace',
+ 'jquery',
+ 'require',
+ 'base/js/events'
+], function (Jupyter, $, requirejs, events) {
+ "use strict";
+
+ var action_follow_cell_on; // set on registration
+ var action_follow_cell_off; // set on registration
+ var action_go_to_runing_cell; // set on registration
+ var params = {
+ is_follow_cell: false,
+ go_to_running_cell_shortcut: 'Alt-I',
+ follow_cell_on_shortcut: "Alt-;",
+ follow_cell_off_shortcut: "Alt-'",
+ button_icon: 'fa-anchor'
+ };
+
+ function scrollIntoRunningCell(event, data) {
+ $('.running')[0].scrollIntoView({ behavior: 'smooth', inline: 'center' });
+ }
+
+ // update params with any specified in the server's config file
+ var update_params = function () {
+ var config = Jupyter.notebook.config;
+ for (var key in params) {
+ if (config.data.hasOwnProperty(key))
+ params[key] = config.data[key];
+ }
+ };
+
+ // Go to Running cell shortcut
+ function go_to_running_cell(event) {
+
+ // Find running cell and click the first one
+ if ($('.running').length > 0) {
+ $('.running')[0].scrollIntoView();
+ }
+ return false;
+ }
+
+ function follow_running_cell_on(event) {
+ Jupyter.notebook.events.on('finished_execute.CodeCell', scrollIntoRunningCell);
+ return false;
+ }
+
+ function follow_running_cell_off(event) {
+ Jupyter.notebook.events.off('finished_execute.CodeCell', scrollIntoRunningCell);
+ return false;
+ }
+
+ // Register actions to collapse and uncollapse the selected heading cell
+
+ function register_new_actions() {
+ action_go_to_runing_cell = Jupyter.keyboard_manager.actions.register({
+ handler: go_to_running_cell,
+ help: "Go to first executing cell",
+ help_index: 'aa',
+ icon: params.button_icon
+ }, 'Go to first running cell', 'Go To Running Cell'
+ )
+ action_follow_cell_on = Jupyter.keyboard_manager.actions.register({
+ handler: follow_running_cell_on,
+ help: "Follow running cell on",
+ help_index: 'aa'
+ }, 'Follow running cell on', 'Go To Running Cell'
+ )
+ action_follow_cell_off = Jupyter.keyboard_manager.actions.register({
+ handler: follow_running_cell_off,
+ help: "Follow running cell off",
+ help_index: 'aa'
+ }, 'Follow running cell off', 'Go To Running Cell'
+ );
+
+ if (params.is_follow_cell) {
+ Jupyter.notebook.events.on('finished_execute.CodeCell', scrollIntoRunningCell);
+ }
+ }
+
+ // Register keyboard shortcuts according to parameters
+ function register_keyboard_shortcuts() {
+
+ var shortcut, edit_shortcuts = Jupyter.keyboard_manager.command_shortcuts;
+ shortcut = params.go_to_running_cell_shortcut;
+ if (shortcut) {
+ edit_shortcuts.add_shortcut(shortcut, action_go_to_runing_cell);
+ }
+
+ shortcut = params.follow_cell_on_shortcut;
+ if (shortcut) {
+ edit_shortcuts.add_shortcut(shortcut, action_follow_cell_on);
+ }
+
+ shortcut = params.follow_cell_off_shortcut;
+ if (shortcut) {
+ edit_shortcuts.add_shortcut(shortcut, action_follow_cell_off);
+ }
+ }
+
+ function load_ipython_extension() {
+ update_params();
+ register_new_actions();
+ register_keyboard_shortcuts();
+ Jupyter.toolbar.add_buttons_group([action_go_to_runing_cell])
+ }
+
+ return {
+ load_ipython_extension: load_ipython_extension,
+ };
+
+});
diff --git a/.local/share/jupyter/nbextensions/help_panel/readme.md b/.local/share/jupyter/nbextensions/help_panel/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..90c6934df72573d0226f7e1de09536af792ff33e
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/help_panel/readme.md
@@ -0,0 +1,15 @@
+Help Panel
+===========
+
+Installing the extension adds a new button to the toolbar:
+
+
+
+On clicking the button, the notebook width is reduced and a side panel is displayed showing help.
+The contents of the help panel are exactly the same as when going to `Keyboard Shortcuts` in the `Help` menu.
+
+
+
+You can drag the sidebar divider to resize it, or click the expand icon at the top left of the bar to get the help panel to expand to fill the screen:
+
+
diff --git a/.local/share/jupyter/nbextensions/hide_input/readme.md b/.local/share/jupyter/nbextensions/hide_input/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..83a224fd4299750ff70343512ff5992e7f0fba6e
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/hide_input/readme.md
@@ -0,0 +1,51 @@
+Hide Input
+==========
+
+This extension allows hiding of an individual codecell in a notebook. This can
+be achieved by clicking on the toolbar button:
+
+
+
+
+Internals
+---------
+
+The codecell hiding state is stored in the metadata `cell.metadata.hide_input`.
+If it is set to `true`, the codecell will be hidden on reload.
+
+
+Exporting with nbconvert
+------------------------
+
+See also the general docs for exporting using nbconvert at
+[jupyter-contrib-nbextensions.readthedocs.io](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/).
+
+To export a notebook with hidden cell inputs using nbconvert, you need to use a
+custom template.
+The required template is supplied as part of
+`jupyter_contrib_nbextensions.nbconvert_support`, or you can roll your own
+using the provided ones as examples. Again, see the docs linked above for more
+information.
+
+The `nbextensions.tpl` template is provided in the
+`jupyter_contrib_nbextensions.nbconvert_support` templates directory (see the
+docs mentioned above for how to find it)
+
+To use, add the template to your `nbconvert` call:
+
+ jupyter nbconvert --template=nbextensions --to=html my_notebook.ipynb
+
+The nbextensions template will respect the `cell.metadata.hide_input` flag, and
+filter the cell's output prompt (the bit that looks like `Out[27]:`).
+The filter is only used for html output, not for PDF or LaTeX output.
+
+If you want to _keep_ the cell output prompt, you will have to remove the lines
+
+ {% block output_group -%}
+ {%- if cell.metadata.hide_output or nb.metadata.hide_input -%}
+ {%- else -%}
+ {{ super() }}
+ {%- endif -%}
+ {% endblock output_group %}
+
+in the `nbextensions.tpl` file.
diff --git a/.local/share/jupyter/nbextensions/highlight_selected_word/configurator.yaml b/.local/share/jupyter/nbextensions/highlight_selected_word/configurator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d215ac18c11c6a58638345018f9993b0108398de
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/highlight_selected_word/configurator.yaml
@@ -0,0 +1,131 @@
+Type: Jupyter Notebook Extension
+Compatibility: 4.x, 5.x
+Name: Highlight selected word
+Main: main.js
+Description: Enables the CodeMirror addon "Match Highlighter"
+Link: README.md
+Parameters:
+
+- name: highlight_selected_word.enable_on_load
+ input_type: checkbox
+ default: true
+ description: |
+ Enable highlighting on loading the notebook interface.
+ The highlighting can also be toggled from the view menu
+
+- name: highlight_selected_word.highlight_across_all_cells
+ input_type: checkbox
+ default: true
+ description: |
+ Highlight matches across all cells. If false, only matches within the
+ currently selected cell will be highlighted.
+
+- name: highlight_selected_word.code_cells_only
+ input_type: checkbox
+ default: false
+ description: |
+ Only apply highlights to editors for Code cells, not, for example, Markdown
+ or Raw cells
+
+- name: highlight_selected_word.highlight_color
+ input_type: color
+ default: '#90EE90'
+ description: Color used to highlight matching words in the focussed cell
+
+- name: highlight_selected_word.highlight_color_blurred
+ input_type: color
+ default: '#BBFFBB'
+ description: Color used to highlight matching words in blurred (non-active) cells
+
+- name: highlight_selected_word.outlines_only
+ input_type: checkbox
+ default: false
+ description: |
+ Highlight words using just an outline, rather than the background color
+
+- name: highlight_selected_word.outline_width
+ input_type: number
+ default: 1
+ min: 0.5
+ step: 0.5
+ description: |
+ Width, in pixels, of the outline used to highlight words when the
+ outline-only setting is selected.
+
+- name: highlight_selected_word.delay
+ input_type: number
+ default: 100
+ min: 0
+ step: 1
+ description: 'Wait time, in milliseconds, before highlighting the matches'
+
+- name: highlight_selected_word.words_only
+ input_type: checkbox
+ default: false
+ description: Only highlight matches if the selected text is a whole word
+
+- name: highlight_selected_word.highlight_only_whole_words
+ input_type: checkbox
+ default: true
+ description: |
+ Only highlight matches when they are surrounded by non-word characters, as
+ determined by the token below (if set), or the default regex '[\w$]'.
+
+- name: highlight_selected_word.show_token
+ input_type: text
+ default: '[\w$]' # single-quote strings in yaml are like python raw strings
+ description: |
+ Token (regex) to identify word characters, used to determine what to
+ highlight when nothing is selected. If blank, nothing is highlighted when
+ nothing is selected.
+
+- name: highlight_selected_word.min_chars
+ input_type: number
+ default: 2
+ min: 0
+ step: 1
+ description: |
+ Minimum number of characters that must be selected for the highlighting
+ to occur (assuming no token is set for use when nothing is selected)
+
+- name: highlight_selected_word.trim
+ input_type: checkbox
+ default: true
+ description: |
+ Trim whitespace from selection text before checking for minimum length
+
+- name: highlight_selected_word.use_toggle_hotkey
+ input_type: checkbox
+ default: false
+ description: |
+ Bind the highlight_selected_word:toggle action to a hotkey
+
+- name: highlight_selected_word.toggle_hotkey
+ input_type: hotkey
+ default: 'alt-h'
+ description: |
+ Hotkey to bind to the highlight_selected_word:toggle action (if selected
+ for use, above)
+
+- name: highlight_selected_word.only_cells_in_scroll
+ input_type: checkbox
+ default: true
+ description: |
+ Only apply highlights to editors which are visible in the scrolled view.
+ This may offer performance benefits for larger notebooks
+
+- name: highlight_selected_word.scroll_min_delay
+ input_type: number
+ default: 100
+ min: 0
+ step: 10
+ description: |
+ Minimum delay in ms between updating highlights on scrolling the notebook
+ (used only if limiting highlights to those in scrolled view, see above).
+ If set to zero, no update is done on scroll.
+
+- name: highlight_selected_word.hide_selections_in_unfocussed
+ input_type: checkbox
+ default: false
+ description: |
+ Hide any text selection in non-focussed cells (can be confused with match highlights).
diff --git a/.local/share/jupyter/nbextensions/init_cell/main.js b/.local/share/jupyter/nbextensions/init_cell/main.js
new file mode 100644
index 0000000000000000000000000000000000000000..a971bd8a80c16d8354e7b4efb4c54ef9a209db44
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/init_cell/main.js
@@ -0,0 +1,157 @@
+define([
+ 'jquery',
+ 'base/js/dialog',
+ 'base/js/events',
+ 'base/js/namespace',
+ 'notebook/js/celltoolbar',
+ 'notebook/js/codecell',
+], function (
+ $,
+ dialog,
+ events,
+ Jupyter,
+ celltoolbar,
+ codecell
+) {
+ "use strict";
+
+ var CellToolbar = celltoolbar.CellToolbar;
+
+ var mod_name = 'init_cell';
+ var log_prefix = '[' + mod_name + ']';
+ var options = { // updated from server's config & nb metadata
+ run_on_kernel_ready: true,
+ };
+
+ var toolbar_preset_name = 'Initialization Cell';
+ var init_cell_ui_callback = CellToolbar.utils.checkbox_ui_generator(
+ toolbar_preset_name,
+ function setter (cell, value) {
+ if (value) {
+ cell.metadata.init_cell = true;
+ }
+ else {
+ delete cell.metadata.init_cell;
+ }
+ },
+ function getter (cell) {
+ // if init_cell is undefined, it'll be interpreted as false anyway
+ return cell.metadata.init_cell;
+ }
+ );
+
+ function count_init_cells () {
+ console.log(log_prefix, 'counting initialization cells');
+ var num = 0;
+ var cells = Jupyter.notebook.get_cells();
+ for (var ii = 0; ii < cells.length; ii++) {
+ var cell = cells[ii];
+ if ((cell instanceof codecell.CodeCell) && cell.metadata.init_cell === true ) {
+ num++;
+ }
+ }
+ console.log(log_prefix, 'found ' + num + ' initialization cell' + (num !== 1 ? 's' : ''));
+ return num
+ }
+
+ function run_init_cells () {
+ console.log(log_prefix, 'running all initialization cells');
+ var num = 0;
+ var cells = Jupyter.notebook.get_cells();
+ for (var ii = 0; ii < cells.length; ii++) {
+ var cell = cells[ii];
+ if ((cell instanceof codecell.CodeCell) && cell.metadata.init_cell === true ) {
+ cell.execute();
+ num++;
+ }
+ }
+ console.log(log_prefix, 'finished running ' + num + ' initialization cell' + (num !== 1 ? 's' : ''));
+ }
+
+ var load_ipython_extension = function() {
+ // register action
+ var prefix = 'auto';
+ var action_name = 'run-initialization-cells';
+ var action = {
+ icon: 'fa-calculator',
+ help: 'Run all initialization cells',
+ help_index : 'zz',
+ handler : run_init_cells
+ };
+ var action_full_name = Jupyter.notebook.keyboard_manager.actions.register(action, action_name, prefix);
+
+ // add toolbar button
+ Jupyter.toolbar.add_buttons_group([action_full_name]);
+
+ // setup things to run on loading config/notebook
+ Jupyter.notebook.config.loaded
+ .then(function update_options_from_config () {
+ $.extend(true, options, Jupyter.notebook.config.data[mod_name]);
+ }, function (reason) {
+ console.warn(log_prefix, 'error loading config:', reason);
+ })
+ .then(function () {
+ if (Jupyter.notebook._fully_loaded) {
+ callback_notebook_loaded();
+ }
+ events.on('notebook_loaded.Notebook', callback_notebook_loaded);
+ }).catch(function (reason) {
+ console.error(log_prefix, 'unhandled error:', reason);
+ });
+ };
+
+ function callback_notebook_loaded () {
+ // update from metadata
+ var md_opts = Jupyter.notebook.metadata[mod_name];
+ if (md_opts !== undefined) {
+ console.log(log_prefix, 'updating options from notebook metadata:', md_opts);
+ $.extend(true, options, md_opts);
+ }
+
+ // register celltoolbar presets if they haven't been already
+ if (CellToolbar.list_presets().indexOf(toolbar_preset_name) < 0) {
+ // Register a callback to create a UI element for a cell toolbar.
+ CellToolbar.register_callback('init_cell.is_init_cell', init_cell_ui_callback, 'code');
+ // Register a preset of UI elements forming a cell toolbar.
+ CellToolbar.register_preset(toolbar_preset_name, ['init_cell.is_init_cell'], Jupyter.notebook);
+ }
+
+ if (options.run_on_kernel_ready) {
+ var num = count_init_cells();
+
+ if (num) {
+ if (Jupyter.notebook.trusted) {
+ run_init_cells_asap()
+ }
+ else {
+ dialog.modal({
+ title : 'Untrusted notebook with initialization code',
+ body : num + ' initialization code cell' + (num !== 1 ? 's' : '') + ' was found but not run since this notebook is untrusted.',
+ buttons: {
+ 'Trust notebook': {
+ 'class' : 'btn-danger',
+ 'click' : () => Jupyter.notebook.trust_notebook()
+ },
+ 'Do nothing': {'class' : 'btn-primary'}
+ },
+ notebook: Jupyter.notebook,
+ keyboard_manager: Jupyter.keyboard_manager,
+ });
+ }
+ }
+ }
+ }
+
+ function run_init_cells_asap () {
+ if (Jupyter.notebook && Jupyter.notebook.kernel && Jupyter.notebook.kernel.info_reply.status === 'ok') {
+ // kernel is already ready
+ run_init_cells();
+ }
+ // whenever a (new) kernel becomes ready, run all initialization cells
+ events.on('kernel_ready.Kernel', run_init_cells);
+ }
+
+ return {
+ load_ipython_extension : load_ipython_extension
+ };
+});
diff --git a/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/keyboard_shortcut_editor.yaml b/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/keyboard_shortcut_editor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..587b58940bbfdfb53cd2a0c6d23871b79a7175fc
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/keyboard_shortcut_editor.yaml
@@ -0,0 +1,12 @@
+Type: IPython Notebook Extension
+Compatibility: 4.x, 5.x
+Name: Keyboard shortcut editor
+Main: main.js
+Icon: icon.png
+Link: README.md
+Description: Edit or remove Jupyter keyboard shortcuts, or add you own new ones
+Parameters:
+- name: kse_show_rebinds
+ description: "Show shortcut editing controls in the shortcuts dialog. If this is false, shortcuts can't be edited directly from the notebook, but any existing edits are still applied. Useful essentially just to make the shortcuts dialog a bit cleaner"
+ input_type: checkbox
+ default: true
diff --git a/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/main.css b/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/main.css
new file mode 100644
index 0000000000000000000000000000000000000000..22e7b9ccab8216727fa3f7929ce1e4d77eeee141
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/main.css
@@ -0,0 +1,40 @@
+.kse-dropdown {
+ margin-left: 0.5em;
+}
+
+.kse-dropdown > .dropdown-menu {
+ min-width: 0;
+ top: 20px;
+}
+
+.kse-input-group-pretty {
+ min-width: 20ex;
+}
+
+.kse-modal-backdrop {
+ background-color: #fff;
+}
+
+.kse-input-group-reset,
+.kse-input-group-pretty {
+ border-right: none;
+}
+
+.kse-input-group-pretty > kbd {
+ color: black;
+ font-weight: bold;
+}
+
+.kse-editor .help-block > p {
+ margin-bottom: 10px;
+}
+
+.kse-editor select {
+ display: inline-block;
+ width: auto;
+ margin: 0;
+}
+
+.kse-links .fa {
+ margin-right: 2px;
+}
\ No newline at end of file
diff --git a/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/readme_reset_disabled.png b/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/readme_reset_disabled.png
new file mode 100644
index 0000000000000000000000000000000000000000..97e9f476e706f68332a11107e40dfb1d1eca1dd3
Binary files /dev/null and b/.local/share/jupyter/nbextensions/keyboard_shortcut_editor/readme_reset_disabled.png differ
diff --git a/.local/share/jupyter/nbextensions/limit_output/icon.png b/.local/share/jupyter/nbextensions/limit_output/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..c18179faff90b3f37dc5fb9e42fa1d657602599b
Binary files /dev/null and b/.local/share/jupyter/nbextensions/limit_output/icon.png differ
diff --git a/.local/share/jupyter/nbextensions/limit_output/main.js b/.local/share/jupyter/nbextensions/limit_output/main.js
new file mode 100644
index 0000000000000000000000000000000000000000..28d86d853966978c0c639b28062440c6605d6340
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/limit_output/main.js
@@ -0,0 +1,133 @@
+// Restrict output in a codecell to a maximum length
+
+define([
+ 'base/js/namespace',
+ 'notebook/js/outputarea',
+ 'notebook/js/codecell',
+], function(
+ Jupyter,
+ oa,
+ cc
+) {
+ "use strict";
+
+ // define default values for config parameters
+ var params = {
+ // maximum number of characters the output area is allowed to print
+ limit_output : 10000,
+ limit_stream : true,
+ limit_execute_result : true,
+ limit_display_data : false,
+ // message to print when output is limited
+ limit_output_message : 'limit_output extension: Maximum message size of {limit_output_length} exceeded with {output_length} characters'
+ };
+
+ // to be called once config is loaded, this updates default config vals
+ // with the ones specified by the server's config file
+ var update_params = function() {
+ var config = Jupyter.notebook.config;
+ for (var key in params) {
+ if (config.data.hasOwnProperty(key) ){
+ params[key] = config.data[key];
+ }
+ }
+ };
+
+ function is_finite_number (n) {
+ n = parseFloat(n);
+ return !isNaN(n) && isFinite(n);
+ }
+
+ var initialize = function () {
+ update_params();
+ // sometimes limit_output metadata val can get stored as a string
+ params.limit_output = parseFloat(params.limit_output);
+ var old_handle_output = oa.OutputArea.prototype.handle_output;
+ oa.OutputArea.prototype.handle_output = function (msg) {
+ var handled_msg_types = ['stream', 'execute_result', 'display_data'];
+ if (handled_msg_types.indexOf(msg.header.msg_type) < 0) {
+ return old_handle_output.apply(this, arguments);
+ }
+ else {
+ // get MAX_CHARACTERS from cell metadata if present, otherwise param
+ //msg.header.msg_type
+ var MAX_CHARACTERS = params.limit_output;
+ var cell_metadata = this.element.closest('.cell').data('cell').metadata;
+ if (is_finite_number(cell_metadata.limit_output)) {
+ MAX_CHARACTERS = parseFloat(cell_metadata.limit_output);
+ }
+
+ // read the length of already-appended outputs from our data
+ var count = this.element.data('limit_output_count') || 0;
+ // update count with the length of this message
+ var old_count = count;
+ if (msg.header.msg_type === "stream" && params.limit_stream) {
+ count += String(msg.content.text).length;
+ }
+ else {
+ if ((msg.header.msg_type === "execute_result" && params.limit_execute_result) ||
+ (msg.header.msg_type === "display_data" && params.limit_display_data)) {
+ count += Math.max(
+ (msg.content.data['text/plain'] === undefined) ? 0 : String(msg.content.data['text/plain']).length,
+ (msg.content.data['text/html'] === undefined) ? 0 : String(msg.content.data['text/html']).length
+ );
+ }
+
+ }
+ // save updated count
+ this.element.data('limit_output_count', count);
+
+ if (count <= MAX_CHARACTERS) {
+ return old_handle_output.apply(this, arguments);
+ }
+ // if here, we'd exceed MAX_CHARACTERS with addition of this message.
+ if (old_count <= MAX_CHARACTERS) {
+ // Apply truncated portion of this message
+ var to_add = MAX_CHARACTERS - old_count;
+ if (msg.header.msg_type === "stream") {
+ msg.content.text = msg.content.text.substr(0, to_add);
+ }
+ else {
+ if (msg.content.data['text/plain'] !== undefined) {
+ msg.content.data['text/plain'] = msg.content.data['text/plain'].substr(0, to_add);
+ }
+ if (msg.content.data['text/html'] !== undefined) {
+ msg.content.data['text/html'] = msg.content.data['text/html'].substr(0, to_add);
+ }
+ }
+ old_handle_output.apply(this, arguments);
+
+ // display limit notification messages
+ console.log(
+ "limit_output: Maximum message size of", MAX_CHARACTERS,
+ "exceeded with", count, "characters. Further output muted."
+ );
+ // allow simple substitutions for output length for quick debugging
+ var limitmsg = params.limit_output_message.replace("{message_type}", msg.header.msg_type)
+ .replace("{limit_output_length}", MAX_CHARACTERS)
+ .replace("{output_length}", count);
+ this.append_output({
+ "output_type": "display_data",
+ "metadata": {}, // included to avoid warning
+ "data": {"text/html": limitmsg}
+ });
+ }
+ }
+ };
+
+ var old_clear_output = oa.OutputArea.prototype.clear_output;
+ oa.OutputArea.prototype.clear_output = function () {
+ // reset counter on execution.
+ this.element.data('limit_output_count', 0);
+ return old_clear_output.apply(this, arguments);
+ };
+ };
+
+ var load_ipython_extension = function() {
+ return Jupyter.notebook.config.loaded.then(initialize);
+ };
+
+ return {
+ load_ipython_extension : load_ipython_extension
+ };
+});
diff --git a/.local/share/jupyter/nbextensions/limit_output/readme.md b/.local/share/jupyter/nbextensions/limit_output/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..48a3d2a3baab578f2e2434481b18e5767c00f794
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/limit_output/readme.md
@@ -0,0 +1,48 @@
+Limit Output
+============
+
+
+Description
+-----------
+
+This extension limits the number of characters a codecell will output as text
+or HTML.
+This also allows the interruption of endless loops of print commands.
+
+[](https://youtu.be/U26ujuPXf00)
+
+You can set the number of characters using the ConfigManager:
+
+```python
+from notebook.services.config import ConfigManager
+cm = ConfigManager().update('notebook', {'limit_output': 1000})
+```
+
+or by using the [jupyter_nbextensions_configurator](https://github.com/Jupyter-contrib/jupyter_nbextensions_configurator)
+
+The limit can also be set for an individual cell, using the cell's
+`cell.metadata.limit_output`.
+
+
+Internals
+---------
+
+Three types of messages are intercepted: `stream`, `execute_result`, and
+`display_data`. For `stream`-type messages, the text string length is limited
+to `limit_output` number of characters.
+For other message types, `text/plain` and `text/html` content length is
+counted, and if either exceeds `limit_output` characters will be truncated to
+`limit_output` number of characters.
+
+The `limit_output_message` parameter can be formatted to display the
+`limit_output` length and the current `output_length`, using the respective
+replacement fields `{limit_output_length}` and `{output_length}`.
+
+### Parameter Overview
+
+* limit_output - Number of characters to limit output to
+* limit_stream - Enable limiting stream messages
+* limit_execute_result - Enable limiting execute_result messages
+* limit_display_data - Enable limiting display_data messages
+* limit_output_message - Message to append when output is limited
+
diff --git a/.local/share/jupyter/nbextensions/livemdpreview/livemdpreview.js b/.local/share/jupyter/nbextensions/livemdpreview/livemdpreview.js
new file mode 100644
index 0000000000000000000000000000000000000000..055d88bc9ad8e0b190f979a537487d4f4b89f4b2
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/livemdpreview/livemdpreview.js
@@ -0,0 +1,112 @@
+define([
+ 'jquery',
+ 'require',
+ 'base/js/namespace',
+ 'base/js/events',
+ 'base/js/utils',
+ 'notebook/js/cell',
+ 'notebook/js/textcell',
+ 'codemirror/lib/codemirror',
+], function (
+ $,
+ requirejs,
+ Jupyter,
+ events,
+ utils,
+ cell_mod,
+ textcell,
+ CodeMirror
+) {
+ "use strict";
+
+ var LiveMdPreviewer = function(options) {
+ options = $.extend(true, {}, this._default_options, options);
+ this.show_side_by_side = options.show_side_by_side;
+ this.timeout = Math.max(50, options.timeout);
+
+ this.addCSS();
+ var lmdp = this;
+ // Change any existing cells:
+ Jupyter.notebook.get_cells().forEach(function (cell) {
+ lmdp.registerCell(cell);
+ });
+ // Ensure we also apply to new cells:
+ events.on('create.Cell', function (evt, data) { lmdp.registerCell(data.cell); });
+ };
+
+ LiveMdPreviewer.prototype._default_options = {
+ show_side_by_side: false,
+ timeout : 500,
+ };
+
+ /**
+ * do work of rendering the markdown cell, without triggering the rendered
+ * event, or altering classes on elements
+ */
+ var previewMdCell = function(cell) {
+ var cached_trigger = cell.events.trigger;
+ cell.events.trigger = function (eventType) {
+ if (eventType !== "rendered.MarkdownCell") {
+ return cached_trigger.apply(this, arguments);
+ }
+ return this;
+ };
+
+ var Cell = cell_mod.Cell;
+ var cached_render = Cell.prototype.render;
+ Cell.prototype.render = function () {
+ return true;
+ };
+
+ try {
+ cell.render();
+ }
+ finally {
+ cell.events.trigger = cached_trigger;
+ Cell.prototype.render = cached_render;
+ }
+ };
+
+ LiveMdPreviewer.prototype.registerCell = function(cell) {
+ if (!(cell instanceof textcell.TextCell)) {
+ return;
+ }
+ var timeout = this.timeout;
+ cell.code_mirror.on('changes', function onCodeMirrorChanges (cm, changes) {
+ if (!cm.state.livemdpreview) {
+ cm.state.livemdpreview = setTimeout(function () {
+ var cell = $(cm.getWrapperElement()).closest('.cell').data('cell');
+ previewMdCell(cell);
+ delete cm.state.livemdpreview;
+ }, timeout);
+ }
+ });
+ };
+
+ LiveMdPreviewer.prototype.addCSS = function () {
+ var styles_elem = $('#livemdpreviewstyles');
+ if (styles_elem.length < 1) {
+ styles_elem = $('')
+
+ $("#maintoolbar-container").append(nbTranslate_toolbar);
+ $("#nbTranslate_toolbar").css({ 'padding': '5px' });
+
+
+ // Initializing toogles checks
+ $('#sourceItem_' + langs[sourceLang] + ' > .fa').toggleClass('fa-check', true)
+ $('#targetItem_' + langs[targetLang] + ' > .fa').toggleClass('fa-check', true)
+
+ for (var langIndex in conf.displayLangs) {
+ var lang = conf.displayLangs[langIndex];
+ if (typeof lang === 'string' || lang instanceof String){
+ $('#displayItem_' + lang + ' .fa')
+ .toggleClass('fa-check', true)
+ }
+ if (conf.displayLangs.indexOf('*')> -1)
+ $('#displayItem_all > .fa').toggleClass('fa-check', true)
+ }
+}
+
+/*
+function create_lang_menu(callback) {
+
+ if ($('#LangSelectionMenu').length > 0) {
+ return;
+ }
+ var displayLangChoiceClone = $('#displayLangChoice').clone()
+
+ $('#help_menu').parent().before('')
+ $('#LangSelectionMenu').addClass('dropdown')
+ .append($('').attr('href', '#')
+ .addClass('dropdown-toogle')
+ .attr('data-toggle', "dropdown")
+ .attr('aria-expanded', "false")
+ .text("Langs"))
+ .append(displayLangChoiceClone)
+ }
+
+*/
\ No newline at end of file
diff --git a/.local/share/jupyter/nbextensions/notify/notification.png b/.local/share/jupyter/nbextensions/notify/notification.png
new file mode 100644
index 0000000000000000000000000000000000000000..0caae96068fee6954824325807a9c9ff47316520
Binary files /dev/null and b/.local/share/jupyter/nbextensions/notify/notification.png differ
diff --git a/.local/share/jupyter/nbextensions/notify/notify.js b/.local/share/jupyter/nbextensions/notify/notify.js
new file mode 100644
index 0000000000000000000000000000000000000000..2bf440943fcf60215cc3d7f6929366dc540f2b34
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/notify/notify.js
@@ -0,0 +1,203 @@
+/*
+
+*************************
+Display Web Notifications
+*************************
+
+Add this file to $(ipython locate)/nbextensions/
+
+*/
+
+define([
+ "jquery",
+ "base/js/namespace",
+ "require",
+], function ($, Jupyter, requirejs) {
+ "use strict";
+
+ var params = {
+ sticky: false,
+ play_sound: false
+ };
+ var audio_file = "./notify.mp3";
+
+ var current_time = function() {
+ return new Date().getTime() / 1000;
+ };
+
+ var start_time = current_time();
+ var min_time = 0;
+ var enabled = false;
+ var first_start = true;
+ var busy_kernel = true;
+
+ var add_permissions_list = function () {
+ var ipython_toolbar = $('#maintoolbar-container');
+ var label = $('').addClass("navbar-text permissions-list").text('Notify:');
+ var select = $('')
+ .attr('id', 'permissions-select')
+ .attr('class', 'permissions-list form-control select-xs')
+ .append($('')
+ .attr('value', 'Disabled')
+ .text('Disabled'));
+ ipython_toolbar.append(label).append(select);
+ select.change(function() {
+ var val = $(this).val();
+ if (val == 'Disabled') {
+ enabled = false;
+ } else {
+ enabled = true;
+ min_time = val;
+ }
+ save_state();
+ });
+ // Add options in addition to the default, 'Disabled'
+ // Options give the minimum kernel busy time in seconds after which a notification is displayed
+ var presets = [0, 5, 10, 30];
+ for (var i=0; i').attr('value', name).text(name));
+ }
+ // Finally, restore the selected option if it was saved in notebook metadata
+ restore_state();
+ };
+
+ var add_permissions_button = function () {
+ if ($("#permissions-button").length === 0) {
+ $(Jupyter.toolbar.add_buttons_group([
+ Jupyter.keyboard_manager.actions.register ({
+ 'help' : 'Grant Notification Permissions',
+ 'icon' : 'fa-check',
+ 'handler': ask_permission,
+ },'grant-notifications-permission', 'notify')
+ ])).find('.btn').attr('id', 'permissions-button');
+ }
+ };
+
+ var ensure_permission = function () {
+ ask_permission(); // Asks for permission on notebook load, doesn't work in Chrome
+ // If don't have permission now, add a button to the toolbar to let user request permission
+ if (Notification && Notification.permission !== "granted") {
+ add_permissions_button();
+ add_permissions_list();
+ $(".permissions-list").hide();
+ } else if (Notification && Notification.permission === "granted") {
+ add_permissions_list();
+ }
+ };
+
+ var ask_permission = function () {
+ if (Notification && Notification.permission !== "granted") {
+ Notification.requestPermission(function (status) {
+ if (Notification.permission !== status) {
+ Notification.permission = status;
+ }
+ // Wait for permission to be granted, then remove the permissions-button and show permissions-list
+ if (Notification && Notification.permission === "granted" && $("#permissions-button").length > 0) {
+ $("#permissions-button").remove();
+ $(".permissions-list").show();
+ }
+ });
+ }
+ };
+
+ var play_notification_sound = function(opts) {
+ /**
+ * NB: the Web Notification API specifies a mechanism for playing sound
+ * with notifications. As of 2017-08-22, it is unsupported in all browsers.
+ * This is a workaround. It should be updated to an implementation like
+ * this when browser support is available:
+ *
+ * opts["sound"] = requirejs.toUrl(audio_file);
+ */
+ try {
+ var audio = new Audio(requirejs.toUrl(audio_file));
+ audio.play();
+ } catch(e) {
+ console.log('HTML5 Audio not supported in browser.');
+ }
+ };
+
+ var notify = function () {
+ var elapsed_time = current_time() - start_time;
+ if (enabled && !first_start && !busy_kernel && elapsed_time >= min_time) {
+ var opts = {
+ body: "Kernel is now idle\n(ran for " + Math.round(elapsed_time) + " secs)",
+ icon: Jupyter.notebook.base_url + "static/base/images/favicon.ico",
+ requireInteraction: params.sticky
+ };
+ if (params.play_sound) {
+ play_notification_sound(opts);
+ }
+ var n = new Notification(Jupyter.notebook.notebook_name, opts);
+ n.onclick = function(event){ window.focus(); }
+ }
+ if (first_start) {
+ first_start = false;
+ }
+ };
+
+ var load_state = function () {
+ if (!Jupyter.notebook) return;
+
+ if ("notify_time" in Jupyter.notebook.metadata) {
+ min_time = Jupyter.notebook.metadata.notify_time;
+ enabled = true;
+ }
+ };
+
+ var save_state = function () {
+ if (enabled) {
+ if (Jupyter.notebook.metadata.notify_time !== min_time) {
+ Jupyter.notebook.metadata.notify_time = min_time;
+ Jupyter.notebook.set_dirty();
+ }
+ } else {
+ if (Jupyter.notebook.metadata.hasOwnProperty('notify_time')) {
+ delete Jupyter.notebook.metadata.notify_time;
+ Jupyter.notebook.set_dirty();
+ }
+ }
+ };
+
+ var restore_state = function () {
+ load_state();
+ // Only proceed if the permissions selector is being shown
+ if ($("#permissions-select").length > 0) {
+ if (!enabled) {
+ $("#permissions-select").val("Disabled");
+ } else {
+ $("#permissions-select").val(min_time);
+ }
+ }
+ };
+
+ var setup_notifier = function () {
+ $([Jupyter.events]).on('kernel_starting.Kernel',function () {
+ first_start = true; // reset first_start status when restarting the kernel
+ });
+
+ $([Jupyter.events]).on('kernel_busy.Kernel',function () {
+ busy_kernel = true;
+ start_time = current_time(); // reset the timer
+ });
+
+ $([Jupyter.events]).on('kernel_idle.Kernel',function () {
+ busy_kernel = false; // Used to make sure that kernel doesn't go busy again within the timeout set below.
+ setTimeout(notify, 500);
+ });
+ };
+
+ var load_ipython_extension = function () {
+ return Jupyter.notebook.config.loaded.then(function() {
+ $.extend(true, params, Jupyter.notebook.config.data.notify);
+ ensure_permission();
+ setup_notifier();
+ });
+ };
+
+ return {
+ load_ipython_extension : load_ipython_extension
+ };
+
+});
diff --git a/.local/share/jupyter/nbextensions/notify/notify.mp3 b/.local/share/jupyter/nbextensions/notify/notify.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..54402aba5910278f485c2bf6ee746de2e8fd5b86
Binary files /dev/null and b/.local/share/jupyter/nbextensions/notify/notify.mp3 differ
diff --git a/.local/share/jupyter/nbextensions/notify/notify.yaml b/.local/share/jupyter/nbextensions/notify/notify.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ed299a3449014ac93d5033763b1227a439d4713a
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/notify/notify.yaml
@@ -0,0 +1,18 @@
+Type: IPython Notebook Extension
+Name: Notify
+Description: >
+ Show a browser notification when kernel becomes idle again after being busy
+ for some time - configurable after 0, 5, 10, or 30 seconds busy.
+Link: readme.md
+Icon: notification.png
+Main: notify.js
+Compatibility: 4.x, 5.x
+Parameters:
+- name: notify.sticky
+ description: Require interactions on notifications to dismiss them. (Chrome only)
+ input_type: checkbox
+ default: false
+- name: notify.play_sound
+ description: Play notification sound.
+ input_type: checkbox
+ default: false
diff --git a/.local/share/jupyter/nbextensions/notify/readme.md b/.local/share/jupyter/nbextensions/notify/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..40f5583ec9f318166fd6e044c84b81955402db2c
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/notify/readme.md
@@ -0,0 +1,48 @@
+# Notebook web notifications
+
+Jupyter notebook extension to display a web notification to notify you when the
+kernel becomes idle.
+This can be useful when running tasks that take more than a couple of seconds
+to complete.
+
+The extension has been tested with the most recent versions of Firefox, Chrome
+and Safari.
+
+Initially, a button to request notification permissions is shown in the toolbar.
+After notification permissions have been granted, this button is replaced by a
+dropdown menu with five choices: Disabled, 0, 5, 10 and 30.
+To activate notifications, select a minimum kernel busy time required to
+trigger a notification (e.g. if selecting 5, a notification will only be shown
+if the kernel was busy for more than 5 seconds). The selection is saved in the
+notebook's metadata and restored when the notebook is re-opened.
+
+You may configure the plugin so that notifications require manual dismissal
+before disappearing. Browser support is limited, see
+[here](https://developer.mozilla.org/en-US/docs/Web/API/notification/requireInteraction)
+to check if your browser supports this. You may also configure the plugin so
+that notifications play a sound.
+
+
+
+
+## Original Source
+This extension originally comes from [@sjpfenniger](https://github.com/sjpfenninger)'s [GitHub repository](https://github.com/sjpfenninger/ipython-extensions).
+
+## Credits
+
+This extension contains sounds created by RSilveira_88 on fresound.org, licensed
+under the CC-BY 3.0 License. Modifications by morrisjim. You may find the
+modified version [here](https://freesound.org/people/morrisjm/sounds/268756/) and
+the original [here](https://freesound.org/people/RSilveira_88/sounds/216306/).
+
+## License
+
+The MIT License (MIT)
+
+Copyright (c) 2014 Stefan Pfenninger
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/.local/share/jupyter/nbextensions/printview/icon.png b/.local/share/jupyter/nbextensions/printview/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..776aad2ce3fa467307e52199885e55959c1b3724
Binary files /dev/null and b/.local/share/jupyter/nbextensions/printview/icon.png differ
diff --git a/.local/share/jupyter/nbextensions/printview/printview-button.png b/.local/share/jupyter/nbextensions/printview/printview-button.png
new file mode 100644
index 0000000000000000000000000000000000000000..e98073d29e3ff44075b06d49962dbaedc7e0b551
Binary files /dev/null and b/.local/share/jupyter/nbextensions/printview/printview-button.png differ
diff --git a/.local/share/jupyter/nbextensions/printview/printview.yaml b/.local/share/jupyter/nbextensions/printview/printview.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c347fa4abfd64298c79238b24e992eaec2c6bb37
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/printview/printview.yaml
@@ -0,0 +1,16 @@
+Type: IPython Notebook Extension
+Name: Printview
+Description: Calls nbconvert and shows the generated output in a new browser tab
+Link: readme.md
+Icon: icon.png
+Main: main.js
+Compatibility: 4.x, 5.x
+Parameters:
+- name: printview_nbconvert_options
+ description: nbconvert options
+ input_type: string
+ default: --to html
+- name: printview_open_tab
+ description: open a new tab in the browser to display nbconvert output (for html and pdf only)
+ input_type: checkbox
+ default: true
diff --git a/.local/share/jupyter/nbextensions/printview/readme.md b/.local/share/jupyter/nbextensions/printview/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..fe9ae7bba05f8dedc843775bd7d0057fd64ee89b
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/printview/readme.md
@@ -0,0 +1,58 @@
+Printview
+=========
+This extension adds a toolbar button to call `jupyter nbconvert` for the current the notebook and optionally display the converted file in a
+new browser tab.
+
+
+
+Supported ouput types to display in a tab are `html` and `pdf`.
+
+Parameters
+----------
+
+ - **`printview_nbconvert_options`**: Options to pass to nbconvert. Default: `--to html`
+ To convert to PDF you can use ` --to pdf`.
+ Using `--to pdf --template printviewlatex.tplx` as the parameter, using a
+ custom template generates a nice looking PDF document.
+ **Note**: Converting to PDF requires a Latex installation running on the
+ notebook server.
+
+ - **`printview_open_tab`**: After conversion, open a new tab.
+ Only available when converting to html or pdf output format. Default true.
+
+
+Note
+----
+
+If you use matplotlib plots and want to generate a PDF document, it is useful to have the IPython backend generate high quality pdf versions of plots
+ using this code snippet:
+
+```python
+ip = get_ipython()
+ibe = ip.configurables[-1]
+ibe.figure_formats = { 'pdf', 'png'}
+```
+
+Internals
+---------
+
+The configuration is stored in the Jupyter configuration path `nbconfig/notebook.js` using two keys:
+`printview_nbconvert_options` and `printview_open_tab`.
+
+You can check the current configuration using the
+[jupyter_nbextensions_configurator](https://github.com/Jupyter-contrib/jupyter_nbextensions_configurator)
+server extension, or with this code snippet:
+
+```python
+import os
+from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir
+from traitlets.config.loader import Config, JSONFileConfigLoader
+
+json_config = os.path.join(jupyter_config_dir(), 'nbconfig/notebook.json')
+if os.path.isfile(json_config) is True:
+ cl = JSONFileConfigLoader(json_config)
+ config = cl.load_config()
+ for k in config:
+ if k.startswith('printview'):
+ print("%s: %s" % (k, config[k]))
+```
diff --git a/.local/share/jupyter/nbextensions/python-markdown/python-markdown-pre.png b/.local/share/jupyter/nbextensions/python-markdown/python-markdown-pre.png
new file mode 100644
index 0000000000000000000000000000000000000000..5226d959bc375b4cc1ceda7cc9168fafe434adf8
Binary files /dev/null and b/.local/share/jupyter/nbextensions/python-markdown/python-markdown-pre.png differ
diff --git a/.local/share/jupyter/nbextensions/python-markdown/python-markdown.png b/.local/share/jupyter/nbextensions/python-markdown/python-markdown.png
new file mode 100644
index 0000000000000000000000000000000000000000..585a0038ac56eef5453de4e97875629bbe2bd357
Binary files /dev/null and b/.local/share/jupyter/nbextensions/python-markdown/python-markdown.png differ
diff --git a/.local/share/jupyter/nbextensions/python-markdown/readme.md b/.local/share/jupyter/nbextensions/python-markdown/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..ebf671fac5995e3f95d66e582acc791390eafa9a
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/python-markdown/readme.md
@@ -0,0 +1,86 @@
+Python Markdown
+===============
+
+The **Python Markdown** extension allows displaying output produced by the current kernel
+in markdown cells. The extensions is basically agnostic to the kernel language, however most
+testing has been done using Python.
+
+For example:
+If you set variable `a` in Python
+
+```python
+a = 1.23
+```
+
+and write the following line in a markdown cell:
+
+ a is {{a}}
+
+It will be displayed as:
+
+ a is 1.23
+
+[](https://youtu.be/_wLwLsgkExc)
+
+The notebook needs to be trusted in order to execute Python commands in markdown.
+This is indicated by the "trusted" check mark:
+
+
+
+If you see the "unstrusted" question mark, use File->Trust Notebook in the menu.
+
+**Caution: If you trust a notebook, you allow it to execute any code that is contained between the `{{...}}`
+curly braces on your notebook server.**
+
+
+Further examples
+----------------
+
+Before rendering the markdown cell:
+
+
+
+After rendering the markdown cell:
+
+
+
+Code is only executed when the notebook is trusted. So if your original code is shown as
+rendered markdown output, please make sure your notebook is trusted. You can check if the notebook
+is trusted by looking at the check mark at the top of the window.
+
+**Caution:** There is no restriction in the expression you can embedd between the curly braces `{{ }}`.
+Be careful as you might crash your browser if you return too large datasets, or worse.
+
+
+Exporting
+---------
+
+In order to have `nbconvert` show the computed output when exporting to another format,
+use the `pre_pymarkdown.py` preprocessor. If you used the `python setup.py install` command to install the
+IPython-contrib extension package, this will already be installed.
+
+For manual setup, you need to copy this file to a location within the Python path (or extend `PYTHONPATH`).
+Additionally, you need to add these two lines to your `jupyter_nbconvert_config.py` configuration file:
+
+```python
+c = get_config()
+c.Exporter.preprocessors = ['pre_pymarkdown.PyMarkdownPreprocessor']
+```
+
+
+Internals
+---------
+
+The extension overrides the `textcell.MarkdownCell.prototype.render` function and searches for the expression enclosed
+in double curly braced `{{ }}`. It then executes the expression and replaces it with the result returned from
+the running kernel, embedded in a `` tag.
+Additionally, the result is saved in the metadata of the markdown cell, i.e. `cell.metadata.variables[varname]`.
+This stored value is displayed when reloading the notebook and used for the nbconvert preprocesser.
+
+The preprocessor `pre_pymarkdown.PyMarkdownPreprocessor` allows `nbconvert` to display the computed variables
+when converting the notebook to an output file format.
+
+Unfortunately, embedding in LaTeX is not supported currently, as computing expressions between the curly braces
+and rendering LaTeX equations is happening asynchronously, and it is difficult to handle this in a consistent way.
+Ideas or pull request to implement this functionality are welcome.
+
diff --git a/.local/share/jupyter/nbextensions/python-markdown/trusted.png b/.local/share/jupyter/nbextensions/python-markdown/trusted.png
new file mode 100644
index 0000000000000000000000000000000000000000..6f4082d3ed9faad8cfacf52932fb6ff9b7453fbb
Binary files /dev/null and b/.local/share/jupyter/nbextensions/python-markdown/trusted.png differ
diff --git a/.local/share/jupyter/nbextensions/python-markdown/untrusted.png b/.local/share/jupyter/nbextensions/python-markdown/untrusted.png
new file mode 100644
index 0000000000000000000000000000000000000000..36c2b1ff08b166b4f386dcdb44f49ed4df4a098e
Binary files /dev/null and b/.local/share/jupyter/nbextensions/python-markdown/untrusted.png differ
diff --git a/.local/share/jupyter/nbextensions/qtconsole/README.md b/.local/share/jupyter/nbextensions/qtconsole/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d3f403a63e0d6e3d3121bfa20547e4afa6240d67
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/qtconsole/README.md
@@ -0,0 +1,4 @@
+QT Console
+==========
+
+Launch a QTConsole attached to the running kernel
diff --git a/.local/share/jupyter/nbextensions/qtconsole/qtconsole.js b/.local/share/jupyter/nbextensions/qtconsole/qtconsole.js
new file mode 100644
index 0000000000000000000000000000000000000000..0a8738d918ee372084ad8a01e57f024728801e85
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/qtconsole/qtconsole.js
@@ -0,0 +1,24 @@
+// Launch QT Console attached to the current kernel
+
+define([
+ 'base/js/namespace',
+ 'base/js/events'
+ ], function(Jupyter, events) {
+ var load_ipython_extension = function () {
+ Jupyter.toolbar.add_buttons_group([
+ /**
+ * Button to launch QTConsole
+ */
+ Jupyter.keyboard_manager.actions.register ({
+ 'help' : 'Run QTConsole',
+ 'icon' : 'fa-terminal',
+ 'handler': function () {
+ Jupyter.notebook.kernel.execute('%qtconsole')
+ }
+ }, 'run-qtconsole', 'qtconsole')
+ ]);
+ };
+ return {
+ load_ipython_extension : load_ipython_extension
+ };
+});
diff --git a/.local/share/jupyter/nbextensions/rubberband/icon.png b/.local/share/jupyter/nbextensions/rubberband/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..eeac6b28dcb164ef57baa0ef5966dc756dca196c
Binary files /dev/null and b/.local/share/jupyter/nbextensions/rubberband/icon.png differ
diff --git a/.local/share/jupyter/nbextensions/rubberband/readme.md b/.local/share/jupyter/nbextensions/rubberband/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..f17a953f28a6fedad35c13b76df2712df017f7f4
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/rubberband/readme.md
@@ -0,0 +1,26 @@
+Rubberband
+==========
+Multi-Cell selection using a rubberband. This extension is only available for IPython version 3.x.
+
+Description
+-----------
+
+The *rubberband* extension allows selecting multiple cells. Cells are selected by pressing `shift` or `ctrl`+`shift` + left mouse button click and dragging the rubber band over the cells.
+
+* `shift` + left mouse button : select cells that are currently touched by the rubberband
+* `ctrl` + `shift` + left mouse button : select cells that were touched by the rubberband
+
+The `ctrl`+`shift` action is useful when scrolling inside the notebook. Scrolling is activated when the mouse reaches the upper or lower boundary of the notebook area. For now, the mouse has to be moved to achieve continuous scrolling.
+
+A short video demonstrating the rubberband extension can be found here:
+[](https://youtu.be/TOPfWhqa3oI)
+
+
+Two other extensions make use of this feature: exercise and chrome_clipboard.
+
+Internals
+---------
+
+New metadata element added to each cell:
+* `cell.metadata.selected` - means this cell is selected
+
diff --git a/.local/share/jupyter/nbextensions/ruler/edit.js b/.local/share/jupyter/nbextensions/ruler/edit.js
new file mode 100644
index 0000000000000000000000000000000000000000..5e4283e27a91f25b2f2294f9d9bce83d83d5c7a4
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/ruler/edit.js
@@ -0,0 +1,4 @@
+define(['./main'], function (ruler) {
+ "use strict";
+ return ruler;
+});
diff --git a/.local/share/jupyter/nbextensions/ruler/ruler_editor.yaml b/.local/share/jupyter/nbextensions/ruler/ruler_editor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2440493a8187d6fcf4bd337cd0b9a3f7442ffe53
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/ruler/ruler_editor.yaml
@@ -0,0 +1,29 @@
+Type: IPython Notebook Extension
+Name: Ruler in Editor
+Description: This extension enables the Ruler feature in the editor
+Link: readme.md
+Icon: icon.png
+Main: edit.js
+Compatibility: 4.x, 5.x
+Parameters:
+
+- name: ruler_column
+ input_type: list
+ list_element:
+ input_type: number
+ description: Column where ruler is displayed
+ default: [78]
+
+- name: ruler_color
+ input_type: list
+ list_element:
+ input_type: color
+ description: Ruler color
+ default: ["#ff0000"]
+
+- name: ruler_linestyle
+ description: 'Ruler style, e.g. solid, dashed'
+ input_type: list
+ default: ['dashed']
+
+Section: edit
diff --git a/.local/share/jupyter/nbextensions/runtools/cellstate.js b/.local/share/jupyter/nbextensions/runtools/cellstate.js
new file mode 100644
index 0000000000000000000000000000000000000000..71a17c3736cd294b98350f5d9278bc831ac0ee6f
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/runtools/cellstate.js
@@ -0,0 +1,20 @@
+
+
+ CodeMirror.defineOption("cellstate", false, function(cm, val, old) {
+ if (old && old != CodeMirror.Init) {
+ cm.clearGutter(cm.state.cellState.options.gutter);
+ cm.state.cellState = null;
+ cm.off("gutterClick", onGutterClick);
+ cm.off("change", onChange);
+ cm.off("viewportChange", onViewportChange);
+ cm.off("swapDoc", onChange);
+ }
+ if (val) {
+ cm.state.cellState = new State(parseOptions(val));
+ updateInViewport(cm);
+ cm.on("gutterClick", onGutterClick);
+ cm.on("change", onChange);
+ cm.on("viewportChange", onViewportChange);
+ cm.on("swapDoc", onChange);
+ }
+ });
diff --git a/.local/share/jupyter/nbextensions/runtools/runtools_marker.png b/.local/share/jupyter/nbextensions/runtools/runtools_marker.png
new file mode 100644
index 0000000000000000000000000000000000000000..ad3914f397c5018d8c46dda43f71c7b2748a2a8f
Binary files /dev/null and b/.local/share/jupyter/nbextensions/runtools/runtools_marker.png differ
diff --git a/.local/share/jupyter/nbextensions/runtools/runtools_nb.png b/.local/share/jupyter/nbextensions/runtools/runtools_nb.png
new file mode 100644
index 0000000000000000000000000000000000000000..162c3660003be2186dbc4fbfd0a2101f93a2b5bc
Binary files /dev/null and b/.local/share/jupyter/nbextensions/runtools/runtools_nb.png differ
diff --git a/.local/share/jupyter/nbextensions/scratchpad/demo.gif b/.local/share/jupyter/nbextensions/scratchpad/demo.gif
new file mode 100644
index 0000000000000000000000000000000000000000..fc69a6ea4cc8e6b1095f2ca12a7513fff4e3f173
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/scratchpad/demo.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfbc6359d32c4b072feea49dca4880a75c6f49dd54c6d14225658f73e0d3ae27
+size 1160320
diff --git a/.local/share/jupyter/nbextensions/scratchpad/main.js b/.local/share/jupyter/nbextensions/scratchpad/main.js
new file mode 100644
index 0000000000000000000000000000000000000000..d92b821a13c7fe15f8c82407cc9726de0102d37b
--- /dev/null
+++ b/.local/share/jupyter/nbextensions/scratchpad/main.js
@@ -0,0 +1,149 @@
+define([
+ 'require',
+ 'jquery',
+ 'base/js/namespace',
+ 'base/js/events',
+ 'base/js/utils',
+ 'notebook/js/codecell',
+], function (
+ requirejs,
+ $,
+ Jupyter,
+ events,
+ utils,
+ codecell
+) {
+ "use strict";
+ var CodeCell = codecell.CodeCell;
+
+ var Scratchpad = function (nb) {
+ var scratchpad = this;
+ this.notebook = nb;
+ this.kernel = nb.kernel;
+ this.km = nb.keyboard_manager;
+ this.collapsed = true;
+
+ // create elements
+ this.element = $("