0-hero commited on
Commit
fdaf774
·
verified ·
1 Parent(s): f9d5f95

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body +3 -0
  2. .cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body +3 -0
  3. .cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body +3 -0
  4. .cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body +3 -0
  5. .cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body +3 -0
  6. .cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body +3 -0
  7. .cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body +3 -0
  8. .cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body +3 -0
  9. .cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body +3 -0
  10. .cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body +3 -0
  11. .cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl +3 -0
  12. .gitattributes +16 -0
  13. .ipynb_checkpoints/model-checkpoint.py +390 -0
  14. .ipynb_checkpoints/train-checkpoint.py +545 -0
  15. .launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,+archive,ubuntu,ppa,ws.op=getSigningKeyData-application,json,c76e9ed0b661c7fa5da42e8fb2da319e +22 -0
  16. .launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,name=%22ppa%22&ws.op=getPPAByName-application,json,bca461ac71b1143128b6fbebfcd56851 +22 -0
  17. .local/share/Trash/info/train_005.bin.trashinfo +3 -0
  18. .local/share/jupyter/nbextensions/exercise2/main.js +169 -0
  19. .local/share/jupyter/nbextensions/freeze/config.yaml +20 -0
  20. .local/share/jupyter/nbextensions/freeze/icon.png +0 -0
  21. .local/share/jupyter/nbextensions/gist_it/icon.png +0 -0
  22. .local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif +3 -0
  23. .local/share/jupyter/nbextensions/go_to_current_running_cell/eye.png +0 -0
  24. .local/share/jupyter/nbextensions/go_to_current_running_cell/main.js +126 -0
  25. .local/share/jupyter/nbextensions/help_panel/readme.md +15 -0
  26. .local/share/jupyter/nbextensions/hide_input/readme.md +51 -0
  27. .local/share/jupyter/nbextensions/highlight_selected_word/configurator.yaml +131 -0
  28. .local/share/jupyter/nbextensions/init_cell/main.js +157 -0
  29. .local/share/jupyter/nbextensions/keyboard_shortcut_editor/keyboard_shortcut_editor.yaml +12 -0
  30. .local/share/jupyter/nbextensions/keyboard_shortcut_editor/main.css +40 -0
  31. .local/share/jupyter/nbextensions/keyboard_shortcut_editor/readme_reset_disabled.png +0 -0
  32. .local/share/jupyter/nbextensions/limit_output/icon.png +0 -0
  33. .local/share/jupyter/nbextensions/limit_output/main.js +133 -0
  34. .local/share/jupyter/nbextensions/limit_output/readme.md +48 -0
  35. .local/share/jupyter/nbextensions/livemdpreview/livemdpreview.js +112 -0
  36. .local/share/jupyter/nbextensions/load_tex_macros/load_tex_macros.yaml +7 -0
  37. .local/share/jupyter/nbextensions/move_selected_cells/README.md +12 -0
  38. .local/share/jupyter/nbextensions/move_selected_cells/main.js +87 -0
  39. .local/share/jupyter/nbextensions/navigation-hotkeys/icon.png +0 -0
  40. .local/share/jupyter/nbextensions/navigation-hotkeys/readme.md +25 -0
  41. .local/share/jupyter/nbextensions/nbTranslate/README.md +56 -0
  42. .local/share/jupyter/nbextensions/nbTranslate/demo1.gif +3 -0
  43. .local/share/jupyter/nbextensions/nbTranslate/demo2.gif +3 -0
  44. .local/share/jupyter/nbextensions/nbTranslate/main.js +136 -0
  45. .local/share/jupyter/nbextensions/nbTranslate/nbTranslate.js +622 -0
  46. .local/share/jupyter/nbextensions/notify/notification.png +0 -0
  47. .local/share/jupyter/nbextensions/notify/notify.js +203 -0
  48. .local/share/jupyter/nbextensions/notify/notify.mp3 +0 -0
  49. .local/share/jupyter/nbextensions/notify/notify.yaml +18 -0
  50. .local/share/jupyter/nbextensions/notify/readme.md +48 -0
.cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c
3
+ size 1080866
.cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bfae9500ad8e7d2937ebccb4906f3bc464d1bf66eedd0e4adabd520811c7b52
3
+ size 2631958
.cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd4c97d69242efd604c1a2077c8b56341e236cfaca78c40f59dcef9b95464fdc
3
+ size 9663908
.cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99e4769b4750076cd4235c044b61232110733322384a94a63791d2e7beacc66
3
+ size 9995162
.cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69
3
+ size 41178528
.cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d2665c5df629eb2f981dab244c01bfa6cdc185f4ffa026639286c4d56fafb54
3
+ size 1221827
.cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dca7c3956b03b7663fac4d150f5e6d4f6f38b2462c1e9afd83bcf7019f17913
3
+ size 1080679
.cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba0d021a166865d2265246961bec0152ff124de910c5cc39f1156ce3fa7c69dc
3
+ size 2110226
.cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047
3
+ size 39855626
.cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57
3
+ size 13064210
.cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8991eedb5038a1ee6fc9904f99c12b40213d66753ed91e261a43d085f5aeab8f
3
+ size 187219571
.gitattributes CHANGED
@@ -33,3 +33,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ .cache/pip/http-v2/0/2/d/4/2/02d4221e858694abc22129c65515f1df2c4c326330eb1a34ceb0b382.body filter=lfs diff=lfs merge=lfs -text
37
+ .cache/pip/http-v2/0/1/f/2/0/01f2082df50502ba9492d64e69db99d1fdb5730707a16c6264b355b8.body filter=lfs diff=lfs merge=lfs -text
38
+ .cache/pip/http-v2/4/f/d/2/5/4fd254dbd56deb4021e55d22c4b489f6c776c69c316eb7345bc91691.body filter=lfs diff=lfs merge=lfs -text
39
+ .cache/pip/http-v2/4/0/2/3/b/4023be7b5b37a7a4144c804ce69828082d4fb2a124d9d8aabc855da8.body filter=lfs diff=lfs merge=lfs -text
40
+ .cache/pip/http-v2/a/e/7/a/2/ae7a241673cf118ca18eca030dc29d2715b1980127dd0e2949514433.body filter=lfs diff=lfs merge=lfs -text
41
+ .cache/pip/http-v2/d/3/3/a/b/d33abf9ad709d023fff05902f39da682c1afb233bcd9f2c479487586.body filter=lfs diff=lfs merge=lfs -text
42
+ .cache/pip/http-v2/d/b/1/f/6/db1f6b45c0850c8e2ce7d8b47148edeca6e8115413af41f4ecc8ce32.body filter=lfs diff=lfs merge=lfs -text
43
+ .cache/pip/http-v2/9/6/e/8/3/96e83221dd149da9a3d38feebc955beb2034effd910108971c5b167b.body filter=lfs diff=lfs merge=lfs -text
44
+ .cache/pip/http-v2/9/e/8/c/8/9e8c8c0496d6d3384d616902379ed05e07b6b1dba9673d70b5fef231.body filter=lfs diff=lfs merge=lfs -text
45
+ .cache/pip/http-v2/f/5/2/7/6/f52769e4b4d00542e1e056baf2db3e5ad8f277bff67f2636cace711d.body filter=lfs diff=lfs merge=lfs -text
46
+ .cache/pip/wheels/7e/e3/c3/89c7a2f3c4adc07cd1c675f8bb7b9ad4d18f64a72bccdfe826/flash_attn-2.6.3-cp310-cp310-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text
47
+ .local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif filter=lfs diff=lfs merge=lfs -text
48
+ .local/share/jupyter/nbextensions/nbTranslate/demo1.gif filter=lfs diff=lfs merge=lfs -text
49
+ .local/share/jupyter/nbextensions/nbTranslate/demo2.gif filter=lfs diff=lfs merge=lfs -text
50
+ .local/share/jupyter/nbextensions/scratchpad/demo.gif filter=lfs diff=lfs merge=lfs -text
51
+ .local/share/jupyter/nbextensions/toc2/demo.gif filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/model-checkpoint.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gpt2-model-positional-encodings.py
2
+
3
+ import math
4
+ import inspect
5
+ from dataclasses import dataclass
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ # Import necessary modules for different positional encodings
12
+ import numpy as np
13
+ import scipy.special
14
+ import scipy.signal
15
+
16
+ from packaging import version
17
+
18
+ # Check if scaled_dot_product_attention is available and supports flash attention
19
+ use_flash_attn = 'scaled_dot_product_attention' in dir(F) and version.parse(torch.__version__) >= version.parse('2.0.0')
20
+ if use_flash_attn:
21
+ print("Flash Attention v2 is available and will be used where possible.")
22
+ else:
23
+ print("Flash Attention v2 is not available. Using standard attention.")
24
+
25
+ class LayerNorm(nn.Module):
26
+ """LayerNorm with optional bias."""
27
+ def __init__(self, ndim, bias):
28
+ super().__init__()
29
+ self.weight = nn.Parameter(torch.ones(ndim))
30
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
31
+ def forward(self, input):
32
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
33
+
34
+ def get_positional_encoding(position, d_model, method, max_len=5000):
35
+ """
36
+ Generate positional encodings based on the specified method.
37
+ """
38
+ if method == 'default':
39
+ return None # Handled by nn.Embedding in the model
40
+ elif method == 'learned':
41
+ return None # Handled by nn.Embedding in the model
42
+ elif method == 'sinusoidal':
43
+ pe = torch.zeros(max_len, d_model)
44
+ position_enc = position.unsqueeze(1)
45
+ div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
46
+ pe[:, 0::2] = torch.sin(position_enc * div_term)
47
+ pe[:, 1::2] = torch.cos(position_enc * div_term)
48
+ return pe
49
+ elif method == 'exponential':
50
+ pe = torch.exp(-position.float() / max_len).unsqueeze(1).repeat(1, d_model)
51
+ return pe
52
+ elif method == 'polynomial_legendre':
53
+ pe = torch.zeros(max_len, d_model)
54
+ x = (position / max_len * 2) - 1 # Scale positions to [-1,1]
55
+ for i in range(d_model):
56
+ pe[:, i] = scipy.special.eval_legendre(i, x)
57
+ return pe
58
+ elif method == 'polynomial_chebyshev':
59
+ pe = torch.zeros(max_len, d_model)
60
+ x = (position / max_len * 2) - 1 # Scale positions to [-1,1]
61
+ for i in range(d_model):
62
+ pe[:, i] = scipy.special.eval_chebyt(i, x)
63
+ return pe
64
+ elif method == 'gaussian':
65
+ pe = torch.zeros(max_len, d_model)
66
+ positions = position.float()
67
+ means = torch.linspace(0, max_len, d_model)
68
+ std = max_len / d_model
69
+ for i in range(d_model):
70
+ pe[:, i] = torch.exp(- ((positions - means[i]) **2) / (2 * std **2))
71
+ return pe
72
+ elif method == 'random_fourier':
73
+ B = torch.randn(d_model, 1)
74
+ x = position.float() / max_len
75
+ x = x @ B.T * 2 * math.pi
76
+ pe = torch.cat([torch.sin(x), torch.cos(x)], dim=1)
77
+ return pe[:, :d_model]
78
+ elif method == 'wavelet':
79
+ pe = torch.zeros(max_len, d_model)
80
+ scales = torch.arange(1, d_model+1)
81
+ x = position.float()
82
+ for i in range(d_model):
83
+ wavelet = scipy.signal.ricker(points=max_len, a=scales[i])
84
+ pe[:, i] = torch.from_numpy(wavelet[position])
85
+ return pe
86
+ elif method == 'bessel':
87
+ pe = torch.zeros(max_len, d_model)
88
+ x = position.float()
89
+ for i in range(d_model):
90
+ pe[:, i] = scipy.special.jv(i, x)
91
+ return pe
92
+ elif method == 'alternative':
93
+ pe = torch.zeros(max_len, d_model)
94
+ position_enc = position.float()
95
+ div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
96
+ pe[:, 0::2] = torch.tan(position_enc * div_term)
97
+ pe[:, 1::2] = torch.sin(position_enc * div_term + math.pi / 4)
98
+ return pe
99
+ elif method == 'none':
100
+ return torch.zeros(max_len, d_model)
101
+ else:
102
+ raise ValueError(f"Unknown positional encoding method: {method}")
103
+
104
+ class CausalSelfAttention(nn.Module):
105
+ def __init__(self, config):
106
+ super().__init__()
107
+ self.config = config
108
+ assert config.n_embd % config.n_head == 0
109
+ self.n_head = config.n_head
110
+ self.n_embd = config.n_embd
111
+ self.dropout = config.dropout
112
+ self.head_dim = self.n_embd // self.n_head
113
+
114
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
115
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
116
+ self.resid_dropout = nn.Dropout(config.dropout)
117
+
118
+ # Implement attention-level positional encodings
119
+ if config.attention_type == 'rope':
120
+ self.rotary_dim = self.n_embd // self.n_head
121
+ if self.rotary_dim % 2 != 0:
122
+ self.rotary_dim -= self.rotary_dim % 2 # Ensure even dimension
123
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, self.rotary_dim, 2).float() / self.rotary_dim))
124
+ self.register_buffer('inv_freq', inv_freq)
125
+ elif config.attention_type == 'alibi':
126
+ slopes = self.get_alibi_slopes(self.n_head)
127
+ self.register_buffer('alibi_slopes', slopes)
128
+ elif config.attention_type == 'relative':
129
+ num_rel_dis = 2 * config.block_size - 1
130
+ self.relative_positions = nn.Embedding(num_rel_dis, self.n_head)
131
+ # else: default attention (nothing extra to define)
132
+
133
+ def get_alibi_slopes(self, n_heads):
134
+ def get_slopes(n):
135
+ import math
136
+ def get_slopes_power_of_2(n):
137
+ start = 2 ** (-2 ** -(math.log2(n) - 3))
138
+ ratio = start
139
+ return [start * (ratio ** i) for i in range(n)]
140
+ if math.log2(n).is_integer():
141
+ return torch.Tensor(get_slopes_power_of_2(n))
142
+ else:
143
+ closest_power_of_2 = 2 ** math.floor(math.log2(n))
144
+ slopes = get_slopes_power_of_2(closest_power_of_2)
145
+ extra_slopes = get_slopes(2 * closest_power_of_2)[0::2][:n - closest_power_of_2]
146
+ return torch.Tensor(slopes + extra_slopes)
147
+ slopes = get_slopes(n_heads)
148
+ return slopes.view(n_heads, 1, 1)
149
+
150
+ def apply_rope(self, x):
151
+ # x: (B, n_head, T, head_dim)
152
+ seq_len = x.size(-2)
153
+ device = x.device
154
+ t = torch.arange(seq_len, device=device, dtype=self.inv_freq.dtype)
155
+ freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
156
+ emb = torch.cat((freqs.sin(), freqs.cos()), dim=-1) # (T, rotary_dim)
157
+ emb = emb[None, None, :, :] # (1, 1, T, rotary_dim)
158
+ x1 = x[..., :self.rotary_dim]
159
+ x2 = x[..., self.rotary_dim:]
160
+ x1_rot = x1 * emb + torch.flip(x1, dims=[-1]) * torch.flip(emb, dims=[-1])
161
+ x = torch.cat((x1_rot, x2), dim=-1)
162
+ return x
163
+
164
+ def forward(self, x, layer_past=None):
165
+ B, T, C = x.size()
166
+ qkv = self.c_attn(x).view(B, T, 3, self.n_head, self.head_dim)
167
+ qkv = qkv.permute(2, 0, 3, 1, 4) # (3, B, n_head, T, head_dim)
168
+ q, k, v = qkv[0], qkv[1], qkv[2] # Each is (B, n_head, T, head_dim)
169
+
170
+ if self.config.attention_type == 'rope':
171
+ q = self.apply_rope(q)
172
+ k = self.apply_rope(k)
173
+
174
+ # Decide whether to use Flash Attention based on training/evaluation mode and tracking flags
175
+ if use_flash_attn and self.config.attention_type in ['default', 'rope'] and not (self.config.track_attention_patterns and not self.training):
176
+ # Use PyTorch's scaled_dot_product_attention which leverages Flash Attention 2
177
+ y = F.scaled_dot_product_attention(
178
+ q, k, v, attn_mask=None,
179
+ dropout_p=self.dropout if self.training else 0.0,
180
+ is_causal=True
181
+ )
182
+ else:
183
+ # Standard attention mechanism
184
+ attn_scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
185
+
186
+ if self.config.attention_type == 'alibi':
187
+ position_ids = torch.arange(T, device=x.device).unsqueeze(0).unsqueeze(0)
188
+ alibi = self.alibi_slopes.to(x.device) * position_ids # (n_head, 1, T)
189
+ attn_scores = attn_scores + alibi
190
+
191
+ elif self.config.attention_type == 'relative':
192
+ positions = torch.arange(-T+1, T, device=x.device)
193
+ rel_pos = self.relative_positions(positions + T -1)
194
+ attn_scores = attn_scores + rel_pos
195
+
196
+ # Apply causal mask
197
+ causal_mask = torch.tril(torch.ones(T, T, device=x.device)).view(1, 1, T, T)
198
+ attn_scores = attn_scores.masked_fill(causal_mask == 0, float('-inf'))
199
+
200
+ attn_weights = F.softmax(attn_scores, dim=-1)
201
+ attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
202
+
203
+ # Collect attention patterns if required
204
+ if self.config.track_attention_patterns and not self.training:
205
+ self.attn_weights = attn_weights.detach().cpu()
206
+ y = torch.matmul(attn_weights, v)
207
+
208
+ y = y.transpose(1, 2).contiguous().view(B, T, C)
209
+ y = self.resid_dropout(self.c_proj(y))
210
+ return y
211
+
212
+ class MLP(nn.Module):
213
+ def __init__(self, config):
214
+ super().__init__()
215
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
216
+ self.gelu = nn.GELU()
217
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
218
+ self.dropout = nn.Dropout(config.dropout)
219
+ def forward(self, x):
220
+ x = self.c_fc(x)
221
+ x = self.gelu(x)
222
+ x = self.c_proj(x)
223
+ x = self.dropout(x)
224
+ return x
225
+
226
+ class Block(nn.Module):
227
+ def __init__(self, config):
228
+ super().__init__()
229
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
230
+ self.attn = CausalSelfAttention(config)
231
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
232
+ self.mlp = MLP(config)
233
+ def forward(self, x):
234
+ x = x + self.attn(self.ln_1(x))
235
+ x = x + self.mlp(self.ln_2(x))
236
+ return x
237
+
238
+ @dataclass
239
+ class GPTConfig:
240
+ block_size: int = 1024
241
+ vocab_size: int = 50304
242
+ n_layer: int = 12
243
+ n_head: int = 12
244
+ n_embd: int = 768
245
+ dropout: float = 0.0
246
+ bias: bool = True
247
+ embedding_type: str = 'default' # Default uses learned positional embeddings
248
+ attention_type: str = 'default' # Default attention without any modifications
249
+ track_activations: bool = False
250
+ track_attention_patterns: bool = False
251
+
252
+ class GPT(nn.Module):
253
+ def __init__(self, config):
254
+ super().__init__()
255
+ assert config.vocab_size is not None
256
+ assert config.block_size is not None
257
+ self.config = config
258
+
259
+ self.transformer = nn.ModuleDict()
260
+ self.transformer['wte'] = nn.Embedding(config.vocab_size, config.n_embd)
261
+
262
+ if config.embedding_type in ['learned', 'default']:
263
+ self.transformer['wpe'] = nn.Embedding(config.block_size, config.n_embd)
264
+ self.pos_emb = None
265
+ elif config.embedding_type == 'none':
266
+ self.transformer['wpe'] = None
267
+ self.pos_emb = None
268
+ else:
269
+ self.transformer['wpe'] = None
270
+ position = torch.arange(0, config.block_size)
271
+ pe = get_positional_encoding(position, config.n_embd, config.embedding_type, config.block_size)
272
+ self.register_buffer('pos_emb', pe)
273
+
274
+ self.transformer['drop'] = nn.Dropout(config.dropout)
275
+ self.transformer['h'] = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
276
+ self.transformer['ln_f'] = LayerNorm(config.n_embd, bias=config.bias)
277
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
278
+ self.transformer['wte'].weight = self.lm_head.weight # Weight tying
279
+
280
+ self.apply(self._init_weights)
281
+ for pn, p in self.named_parameters():
282
+ if pn.endswith('c_proj.weight'):
283
+ nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer))
284
+
285
+ # Initialize activations and attention patterns
286
+ self.activations = []
287
+ self.attention_patterns = []
288
+
289
+ print("Number of parameters: {:.2f}M".format(self.get_num_params() / 1e6))
290
+
291
+ def get_num_params(self, non_embedding=True):
292
+ n_params = sum(p.numel() for p in self.parameters())
293
+ if non_embedding and self.transformer['wpe'] is not None:
294
+ n_params -= self.transformer['wpe'].weight.numel()
295
+ return n_params
296
+
297
+ def _init_weights(self, module):
298
+ if isinstance(module, nn.Linear):
299
+ nn.init.normal_(module.weight, mean=0.0, std=0.02)
300
+ if module.bias is not None:
301
+ nn.init.zeros_(module.bias)
302
+ elif isinstance(module, nn.Embedding):
303
+ nn.init.normal_(module.weight, mean=0.0, std=0.02)
304
+
305
+ def forward(self, idx, targets=None):
306
+ device = idx.device
307
+ b, t = idx.size()
308
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
309
+ pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
310
+
311
+ tok_emb = self.transformer['wte'](idx) # token embeddings
312
+
313
+ if self.config.embedding_type in ['learned', 'default']:
314
+ pos_emb = self.transformer['wpe'](pos)
315
+ x = tok_emb + pos_emb
316
+ elif self.config.embedding_type == 'none':
317
+ x = tok_emb
318
+ else:
319
+ pos_emb = self.pos_emb[:t, :].to(device)
320
+ x = tok_emb + pos_emb.unsqueeze(0)
321
+
322
+ x = self.transformer['drop'](x)
323
+
324
+ # Reset activations and attention patterns if tracking
325
+ if self.config.track_activations and not self.training:
326
+ self.activations = []
327
+ if self.config.track_attention_patterns and not self.training:
328
+ self.attention_patterns = []
329
+
330
+ for block in self.transformer['h']:
331
+ x = block(x)
332
+ if self.config.track_activations and not self.training:
333
+ self.activations.append(x.detach().cpu())
334
+ if self.config.track_attention_patterns and not self.training:
335
+ if hasattr(block.attn, 'attn_weights'):
336
+ self.attention_patterns.append(block.attn.attn_weights)
337
+ x = self.transformer['ln_f'](x)
338
+ logits = self.lm_head(x)
339
+
340
+ if targets is not None:
341
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
342
+ else:
343
+ loss = None
344
+
345
+ return logits, loss
346
+
347
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
348
+ # Start with all candidate parameters
349
+ param_dict = {pn: p for pn, p in self.named_parameters() if p.requires_grad}
350
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
351
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
352
+ optim_groups = [
353
+ {'params': decay_params, 'weight_decay': weight_decay},
354
+ {'params': nodecay_params, 'weight_decay': 0.0},
355
+ ]
356
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
357
+ use_fused = fused_available and device_type == 'cuda'
358
+ extra_args = dict(fused=True) if use_fused else dict()
359
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
360
+ print(f"Using fused AdamW: {use_fused}")
361
+
362
+ return optimizer
363
+
364
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
365
+ """Estimate model flops utilization (MFU)"""
366
+ N = self.get_num_params()
367
+ cfg = self.config
368
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd // cfg.n_head, cfg.block_size
369
+ flops_per_token = 6 * N + 12 * L * H * Q * T
370
+ flops_per_fwdbwd = flops_per_token * T
371
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
372
+ flops_achieved = flops_per_iter * (1.0 / dt)
373
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
374
+ mfu = flops_achieved / flops_promised
375
+ return mfu
376
+
377
+ @torch.no_grad()
378
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
379
+ """Generate sequences of tokens from the model"""
380
+ for _ in range(max_new_tokens):
381
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
382
+ logits, _ = self(idx_cond)
383
+ logits = logits[:, -1, :] / temperature
384
+ if top_k is not None:
385
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
386
+ logits[logits < v[:, [-1]]] = -float('Inf')
387
+ probs = F.softmax(logits, dim=-1)
388
+ idx_next = torch.multinomial(probs, num_samples=1)
389
+ idx = torch.cat((idx, idx_next), dim=1)
390
+ return idx
.ipynb_checkpoints/train-checkpoint.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # torchrun --standalone --nproc_per_node=2 train.py --batch_size=96
2
+
3
+ # train.py
4
+ import os
5
+ import time
6
+ import math
7
+ from contextlib import nullcontext
8
+ import json
9
+
10
+ import numpy as np
11
+ import torch
12
+ from torch.nn.parallel import DistributedDataParallel as DDP
13
+ from torch.distributed import init_process_group, destroy_process_group
14
+ import pandas as pd
15
+
16
+ import tiktoken
17
+ from model import GPTConfig, GPT
18
+
19
+ # Import wandb and tqdm
20
+ import wandb
21
+ from tqdm.auto import tqdm
22
+
23
+ # -----------------------------------------------------------------------------
24
+ # Default configuration with added positional encoding options
25
+ # I/O
26
+ out_dir = 'out'
27
+ eval_interval = 100 # Evaluate every 100 iterations
28
+ log_interval = 1 # Log every iteration
29
+ eval_iters = 100
30
+ eval_only = False
31
+ always_save_checkpoint = True
32
+ init_from = 'scratch' # 'scratch' | 'resume' | 'checkpoint'
33
+ checkpoint_path = '' # Path to a specific checkpoint to load
34
+ # wandb logging
35
+ wandb_log = True
36
+ wandb_project = 'gpt2_positional_encodings_100B'
37
+ wandb_run_name = 'experiment'
38
+ # data
39
+ dataset = 'fineweb'
40
+ gradient_accumulation_steps = 40
41
+ batch_size = 12
42
+ block_size = 512
43
+ # model
44
+ n_layer = 4
45
+ n_head = 4
46
+ n_embd = 256
47
+ dropout = 0.0
48
+ bias = False
49
+ # adamw optimizer
50
+ learning_rate = 6e-4
51
+ max_iters = 10000
52
+ weight_decay = 1e-1
53
+ beta1 = 0.9
54
+ beta2 = 0.95
55
+ grad_clip = 1.0
56
+ # learning rate decay settings
57
+ decay_lr = True
58
+ warmup_iters = 100
59
+ lr_decay_iters = 10000
60
+ min_lr = 6e-5
61
+ # DDP settings
62
+ backend = 'nccl'
63
+ # system
64
+ device = 'cuda'
65
+ dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16'
66
+ compile = True
67
+ # Positional Encodings
68
+ embedding_types = ['wavelet']
69
+ attention_types = ['default']
70
+ # Data collection options
71
+ collect_attention_patterns = False # Set to True to collect attention patterns
72
+ collect_activations = False # Set to True to collect activations
73
+ # Evaluation datasets
74
+ eval_datasets = ['wikitext-103-v1', 'ptb', 'lambada'] # WikiText-103 and Penn Treebank
75
+ seed = 1337
76
+ # -----------------------------------------------------------------------------
77
+ config_keys = [k for k, v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str, list, tuple))]
78
+ exec(open('configurator.py').read())
79
+ config = {k: globals()[k] for k in config_keys}
80
+ # -----------------------------------------------------------------------------
81
+
82
+ def is_compatible(embedding_type, attention_type):
83
+ # Incompatible combinations can be specified here
84
+ incompatible_combinations = [
85
+ # If specific combinations are incompatible
86
+ ]
87
+
88
+ # If embedding_type or attention_type is 'none', some attention methods may not function properly
89
+ if embedding_type == 'none' and attention_type in ['relative', 'rope']:
90
+ return False
91
+
92
+ # 'rope' attention requires even dimension per head
93
+ if attention_type == 'rope' and ((n_embd // n_head) % 2 != 0):
94
+ return False
95
+
96
+ return (embedding_type, attention_type) not in incompatible_combinations
97
+
98
+ def main():
99
+ # Initialize DDP if needed
100
+ global gradient_accumulation_steps
101
+ ddp = int(os.environ.get('RANK', -1)) != -1
102
+ if ddp:
103
+ init_process_group(backend=backend)
104
+ ddp_rank = int(os.environ['RANK'])
105
+ ddp_local_rank = int(os.environ['LOCAL_RANK'])
106
+ ddp_world_size = int(os.environ['WORLD_SIZE'])
107
+ device_local = f'cuda:{ddp_local_rank}'
108
+ torch.cuda.set_device(device_local)
109
+ master_process = ddp_rank == 0
110
+ seed_offset = ddp_rank
111
+ assert gradient_accumulation_steps % ddp_world_size == 0
112
+ gradient_accumulation_steps //= ddp_world_size
113
+ else:
114
+ master_process = True
115
+ seed_offset = 0
116
+ ddp_world_size = 1
117
+ device_local = device # Use the default device
118
+
119
+ tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size
120
+ if master_process:
121
+ print(f"Tokens per iteration will be: {tokens_per_iter:,}")
122
+
123
+ if master_process:
124
+ os.makedirs(out_dir, exist_ok=True)
125
+
126
+ # Set random seed
127
+ global seed
128
+ seed += seed_offset
129
+ torch.manual_seed(seed)
130
+ np.random.seed(seed)
131
+ torch.backends.cuda.matmul.allow_tf32 = True
132
+ torch.backends.cudnn.allow_tf32 = True
133
+ device_type = 'cuda' if 'cuda' in device_local else 'cpu'
134
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
135
+ ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
136
+
137
+ # Load tokenizer using tiktoken
138
+ tokenizer = tiktoken.get_encoding("gpt2")
139
+
140
+ # Prepare evaluation datasets
141
+ eval_data = {}
142
+ for eval_dataset in eval_datasets:
143
+ eval_data_path = os.path.join('data', eval_dataset)
144
+ if not os.path.exists(eval_data_path):
145
+ raise FileNotFoundError(f"Dataset {eval_dataset} not found. Please run prepare_evaluation_data.py first.")
146
+
147
+ if eval_dataset in ['wikitext-2-v1', 'wikitext-103-v1']:
148
+ train_file = [f for f in os.listdir(eval_data_path) if f.startswith('train')][0]
149
+ val_file = [f for f in os.listdir(eval_data_path) if f.startswith('validation')][0]
150
+
151
+ train_df = pd.read_parquet(os.path.join(eval_data_path, train_file))
152
+ val_df = pd.read_parquet(os.path.join(eval_data_path, val_file))
153
+
154
+ train_text = '\n'.join(train_df['text'])
155
+ val_text = '\n'.join(val_df['text'])
156
+
157
+ elif eval_dataset == 'ptb':
158
+ with open(os.path.join(eval_data_path, 'train.txt'), 'r') as f:
159
+ train_text = f.read()
160
+ with open(os.path.join(eval_data_path, 'valid.txt'), 'r') as f:
161
+ val_text = f.read()
162
+
163
+ elif eval_dataset == 'lambada':
164
+ with open(os.path.join(eval_data_path, 'lambada_test.jsonl'), 'r') as f:
165
+ data = [json.loads(line) for line in f]
166
+ test_text = '\n'.join([item['text'] for item in data])
167
+ train_text = test_text[:len(test_text)//2] # Use first half as pseudo-train
168
+ val_text = test_text[len(test_text)//2:] # Use second half as pseudo-val
169
+
170
+ else:
171
+ raise ValueError(f"Unknown dataset: {eval_dataset}")
172
+
173
+ # Tokenize
174
+ train_ids = tokenizer.encode_ordinary(train_text)
175
+ val_ids = tokenizer.encode_ordinary(val_text)
176
+
177
+ # Convert to numpy arrays
178
+ train_ids = np.array(train_ids, dtype=np.uint16)
179
+ val_ids = np.array(val_ids, dtype=np.uint16)
180
+
181
+ eval_data[eval_dataset] = {'train': train_ids, 'val': val_ids}
182
+
183
+ # Data loading
184
+ data_dir = os.path.join('data', dataset)
185
+ # Update the get_batch function to handle evaluation datasets
186
+ def get_batch(split, dataset='main'):
187
+ if dataset == 'main':
188
+ if split == 'train':
189
+ data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
190
+ else:
191
+ data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
192
+ else:
193
+ data = eval_data[dataset][split]
194
+
195
+ ix = torch.randint(len(data) - block_size, (batch_size,))
196
+ x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix])
197
+ y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix])
198
+ if device_type == 'cuda':
199
+ x, y = x.pin_memory().to(device_local, non_blocking=True), y.pin_memory().to(device_local, non_blocking=True)
200
+ else:
201
+ x, y = x.to(device_local), y.to(device_local)
202
+ return x, y
203
+
204
+ # Attempt to derive vocab_size from the dataset
205
+ meta_path = os.path.join(data_dir, 'meta.json')
206
+ meta_vocab_size = None
207
+ if os.path.exists(meta_path):
208
+ with open(meta_path, 'r') as f:
209
+ meta = json.load(f)
210
+ meta_vocab_size = meta['vocab_size']
211
+ if master_process:
212
+ print(f"Found vocab_size = {meta_vocab_size} (inside {meta_path})")
213
+
214
+ # Helps estimate loss and collect attention patterns and activations
215
+ @torch.no_grad()
216
+ def estimate_loss(model, collect_attention_patterns=False, collect_activations=False, save_dir=None, max_batches_to_save=None):
217
+ out = {}
218
+ model.eval()
219
+ # Access the underlying model if wrapped with DDP
220
+ raw_model = model.module if hasattr(model, 'module') else model
221
+
222
+ # Set tracking flags on the underlying model
223
+ raw_model.config.track_attention_patterns = collect_attention_patterns
224
+ raw_model.config.track_activations = collect_activations
225
+
226
+ if collect_attention_patterns or collect_activations:
227
+ if save_dir is None:
228
+ raise ValueError("save_dir must be specified when collecting attention patterns or activations.")
229
+ if master_process:
230
+ os.makedirs(save_dir, exist_ok=True)
231
+
232
+ for split in ['train', 'val']:
233
+ losses = torch.zeros(eval_iters)
234
+ save_count = 0 # Counter for saved batches
235
+ for k in range(eval_iters):
236
+ X, Y = get_batch(split)
237
+ with ctx:
238
+ logits, loss = model(X, Y)
239
+ losses[k] = loss.item()
240
+ # Collect and save attention patterns and activations
241
+ if (collect_attention_patterns or collect_activations) and save_count < (max_batches_to_save or eval_iters):
242
+ if collect_attention_patterns or collect_activations:
243
+ if master_process:
244
+ batch_dir = os.path.join(save_dir, f"{split}_batch_{k}")
245
+ os.makedirs(batch_dir, exist_ok=True)
246
+ # Save activations
247
+ if collect_activations and hasattr(raw_model, 'activations'):
248
+ for idx, activation in enumerate(raw_model.activations):
249
+ activation_path = os.path.join(batch_dir, f"activation_layer_{idx}.pt")
250
+ torch.save(activation, activation_path)
251
+ # Save attention patterns
252
+ if collect_attention_patterns and hasattr(raw_model, 'attention_patterns'):
253
+ for idx, attention in enumerate(raw_model.attention_patterns):
254
+ attention_path = os.path.join(batch_dir, f"attention_layer_{idx}.pt")
255
+ torch.save(attention, attention_path)
256
+ # Clear activations and attention patterns from the model
257
+ raw_model.activations = []
258
+ raw_model.attention_patterns = []
259
+ save_count += 1
260
+ out[split] = losses.mean().item()
261
+
262
+ # Evaluate on additional datasets
263
+ for eval_dataset in eval_datasets:
264
+ split_losses = {}
265
+ for split in ['train', 'val']:
266
+ losses = torch.zeros(eval_iters)
267
+ save_count = 0 # Counter for saved batches
268
+ for k in range(eval_iters):
269
+ X, Y = get_batch(split, dataset=eval_dataset)
270
+ with ctx:
271
+ logits, loss = model(X, Y)
272
+ losses[k] = loss.item()
273
+ # Collect and save attention patterns and activations
274
+ if (collect_attention_patterns or collect_activations) and save_count < (max_batches_to_save or eval_iters):
275
+ if collect_attention_patterns or collect_activations:
276
+ if master_process:
277
+ batch_dir = os.path.join(save_dir, f"{eval_dataset}_{split}_batch_{k}")
278
+ os.makedirs(batch_dir, exist_ok=True)
279
+ # Save activations
280
+ if collect_activations and hasattr(raw_model, 'activations'):
281
+ for idx, activation in enumerate(raw_model.activations):
282
+ activation_path = os.path.join(batch_dir, f"activation_layer_{idx}.pt")
283
+ torch.save(activation, activation_path)
284
+ # Save attention patterns
285
+ if collect_attention_patterns and hasattr(raw_model, 'attention_patterns'):
286
+ for idx, attention in enumerate(raw_model.attention_patterns):
287
+ attention_path = os.path.join(batch_dir, f"attention_layer_{idx}.pt")
288
+ torch.save(attention, attention_path)
289
+ # Clear activations and attention patterns from the model
290
+ raw_model.activations = []
291
+ raw_model.attention_patterns = []
292
+ save_count += 1
293
+ split_losses[split] = losses.mean().item()
294
+ out[eval_dataset] = split_losses
295
+ model.train()
296
+ # Reset tracking flags
297
+ raw_model.config.track_attention_patterns = False
298
+ raw_model.config.track_activations = False
299
+ return out
300
+
301
+ # Learning rate decay scheduler
302
+ def get_lr(it):
303
+ if it < warmup_iters:
304
+ return learning_rate * it / warmup_iters
305
+ if it > lr_decay_iters:
306
+ return min_lr
307
+ decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
308
+ coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
309
+ return min_lr + coeff * (learning_rate - min_lr)
310
+
311
+ # Training loop over positional encoding combinations
312
+ for embedding_type in embedding_types:
313
+ for attention_type in attention_types:
314
+ if not is_compatible(embedding_type, attention_type):
315
+ if master_process:
316
+ print(f"Skipping incompatible combination: Embedding={embedding_type}, Attention={attention_type}")
317
+ continue
318
+
319
+ # Configure model arguments
320
+ model_args = dict(
321
+ n_layer=n_layer,
322
+ n_head=n_head,
323
+ n_embd=n_embd,
324
+ block_size=block_size,
325
+ bias=bias,
326
+ vocab_size=None,
327
+ dropout=dropout,
328
+ embedding_type=embedding_type,
329
+ attention_type=attention_type,
330
+ track_activations=False,
331
+ track_attention_patterns=False,
332
+ )
333
+
334
+ # Initialize or resume model
335
+ iter_num = 0
336
+ best_val_loss = 1e9 # initialize best val loss to a high value
337
+ checkpoint = None
338
+ run_id = None # Initialize run_id to None
339
+
340
+ if init_from == 'scratch':
341
+ if master_process:
342
+ print(f"\nInitializing new model with embedding_type={embedding_type}, attention_type={attention_type}")
343
+ if meta_vocab_size is None:
344
+ if master_process:
345
+ print("Defaulting to vocab_size of GPT-2 to 50257")
346
+ model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50257
347
+ gptconf = GPTConfig(**model_args)
348
+ model = GPT(gptconf)
349
+ elif init_from == 'resume':
350
+ # Resume from the latest checkpoint
351
+ ckpt_path = os.path.join(out_dir, f"ckpt_{embedding_type}_{attention_type}.pt")
352
+ if not os.path.exists(ckpt_path):
353
+ raise FileNotFoundError(f"Checkpoint not found at {ckpt_path}")
354
+ if master_process:
355
+ print(f"\nResuming training from checkpoint {ckpt_path}")
356
+ checkpoint = torch.load(ckpt_path, map_location=device_local)
357
+ gptconf = GPTConfig(**checkpoint['model_args'])
358
+ model = GPT(gptconf)
359
+ model.load_state_dict(checkpoint['model'])
360
+ iter_num = checkpoint['iter_num']
361
+ best_val_loss = checkpoint['best_val_loss']
362
+ seed = checkpoint.get('seed', seed)
363
+ run_id = checkpoint.get('wandb_run_id', None)
364
+ elif init_from == 'checkpoint':
365
+ # Resume from a specific checkpoint
366
+ if not checkpoint_path or not os.path.exists(checkpoint_path):
367
+ raise FileNotFoundError(f"Checkpoint not found at {checkpoint_path}")
368
+ if master_process:
369
+ print(f"\nLoading model from checkpoint {checkpoint_path}")
370
+ checkpoint = torch.load(checkpoint_path, map_location=device_local)
371
+ gptconf = GPTConfig(**checkpoint['model_args'])
372
+ model = GPT(gptconf)
373
+ model.load_state_dict(checkpoint['model'])
374
+ iter_num = checkpoint['iter_num']
375
+ best_val_loss = checkpoint['best_val_loss']
376
+ seed = checkpoint.get('seed', seed)
377
+ run_id = checkpoint.get('wandb_run_id', None)
378
+ else:
379
+ raise ValueError(f"Unknown init_from '{init_from}'")
380
+
381
+ # Set random seed
382
+ seed += seed_offset
383
+ torch.manual_seed(seed)
384
+ np.random.seed(seed)
385
+
386
+ model.to(device_local)
387
+ scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16'))
388
+ optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type)
389
+
390
+ # Load optimizer state if resuming
391
+ if checkpoint is not None:
392
+ optimizer.load_state_dict(checkpoint['optimizer'])
393
+
394
+ if compile:
395
+ if master_process:
396
+ print("Compiling the model... (takes a ~minute)")
397
+ unoptimized_model = model
398
+ model = torch.compile(model)
399
+
400
+ if ddp:
401
+ model = DDP(model, device_ids=[ddp_local_rank])
402
+
403
+ # Logging with WandB
404
+ if wandb_log and master_process:
405
+ run_name = f"{embedding_type}_{attention_type}_{wandb_run_name}"
406
+ # Initialize WandB
407
+ wandb.init(project=wandb_project, name=run_name, config=config, resume='allow', id=run_id)
408
+ # Save the run ID for resuming later
409
+ run_id = wandb.run.id
410
+ else:
411
+ run_id = None
412
+
413
+ # Training loop
414
+ X, Y = get_batch('train')
415
+ t0 = time.time()
416
+ local_iter_num = 0
417
+ raw_model = model.module if hasattr(model, 'module') else model
418
+ running_mfu = -1.0
419
+ progress_bar = tqdm(total=max_iters, initial=iter_num, desc=f"Training {embedding_type} + {attention_type}", disable=not master_process)
420
+ progress_bar_update_freq = 1 # Update progress bar every iteration
421
+
422
+ while True:
423
+ # Determine learning rate
424
+ lr = get_lr(iter_num) if decay_lr else learning_rate
425
+ for param_group in optimizer.param_groups:
426
+ param_group['lr'] = lr
427
+
428
+ # Evaluate and checkpoint
429
+ if iter_num % eval_interval == 0 and iter_num > 0:
430
+ # Define save_dir for collected data
431
+ eval_data_dir = os.path.join('data', 'eval_data', f"{embedding_type}_{attention_type}", f"step_{iter_num}")
432
+ # Set a limit on the number of batches to save during evaluation
433
+ max_batches_to_save = 10 # Adjust this number as needed to control storage usage
434
+ losses = estimate_loss(model,
435
+ collect_attention_patterns=collect_attention_patterns,
436
+ collect_activations=collect_activations,
437
+ save_dir=eval_data_dir,
438
+ max_batches_to_save=max_batches_to_save)
439
+ if master_process:
440
+ print(f"\nStep {iter_num}:")
441
+ print(f"Train loss: {losses['train']:.4f}, Val loss: {losses['val']:.4f}")
442
+ for eval_dataset in eval_datasets:
443
+ print(f"{eval_dataset} - Train loss: {losses[eval_dataset]['train']:.4f}, Val loss: {losses[eval_dataset]['val']:.4f}")
444
+ # Log to wandb
445
+ if wandb_log:
446
+ wandb_metrics = {
447
+ "iter": iter_num,
448
+ "train/loss": losses['train'],
449
+ "val/loss": losses['val'],
450
+ "lr": lr,
451
+ "mfu": running_mfu * 100,
452
+ }
453
+ for eval_dataset in eval_datasets:
454
+ wandb_metrics[f"{eval_dataset}/train_loss"] = losses[eval_dataset]['train']
455
+ wandb_metrics[f"{eval_dataset}/val_loss"] = losses[eval_dataset]['val']
456
+ wandb.log(wandb_metrics, step=iter_num)
457
+ if losses['val'] < best_val_loss or always_save_checkpoint:
458
+ best_val_loss = losses['val']
459
+ if iter_num > 0:
460
+ checkpoint = {
461
+ 'model': raw_model.state_dict(),
462
+ 'optimizer': optimizer.state_dict(),
463
+ 'model_args': model_args,
464
+ 'iter_num': iter_num,
465
+ 'best_val_loss': best_val_loss,
466
+ 'config': config,
467
+ 'seed': seed,
468
+ 'wandb_run_id': run_id
469
+ }
470
+ ckpt_path = os.path.join(out_dir, f"ckpt_{embedding_type}_{attention_type}.pt")
471
+ if master_process:
472
+ print(f"Saving checkpoint to {ckpt_path}")
473
+ torch.save(checkpoint, ckpt_path)
474
+ # Update progress bar postfix
475
+ if master_process:
476
+ postfix_dict = {
477
+ 'train_loss': f"{losses['train']:.4f}",
478
+ 'val_loss': f"{losses['val']:.4f}"
479
+ }
480
+ for eval_dataset in eval_datasets:
481
+ postfix_dict[f"{eval_dataset}_val_loss"] = f"{losses[eval_dataset]['val']:.4f}"
482
+ progress_bar.set_postfix(postfix_dict)
483
+
484
+ if eval_only:
485
+ break
486
+
487
+ # Forward backward update
488
+ for micro_step in range(gradient_accumulation_steps):
489
+ if ddp:
490
+ model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1)
491
+ with ctx:
492
+ logits, loss = model(X, Y)
493
+ loss = loss / gradient_accumulation_steps
494
+ X, Y = get_batch('train')
495
+ scaler.scale(loss).backward()
496
+ if grad_clip != 0.0:
497
+ scaler.unscale_(optimizer)
498
+ torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
499
+ scaler.step(optimizer)
500
+ scaler.update()
501
+ optimizer.zero_grad(set_to_none=True)
502
+
503
+ # Logging
504
+ t1 = time.time()
505
+ dt = t1 - t0
506
+ t0 = t1
507
+ if iter_num % log_interval == 0:
508
+ lossf = loss.item() * gradient_accumulation_steps
509
+ if local_iter_num >= 5:
510
+ mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
511
+ running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu
512
+ if master_process:
513
+ progress_bar.set_postfix({
514
+ 'loss': f"{lossf:.4f}",
515
+ 'lr': f"{lr:.2e}",
516
+ 'mfu': f"{running_mfu*100:.2f}%",
517
+ 'time_per_iter_ms': f"{dt * 1000:.2f}ms",
518
+ })
519
+ if wandb_log:
520
+ wandb.log({
521
+ "iter": iter_num,
522
+ "train/loss": lossf,
523
+ "lr": lr,
524
+ "mfu": running_mfu * 100,
525
+ "time_per_iter_ms": dt * 1000,
526
+ }, step=iter_num)
527
+ iter_num += 1
528
+ local_iter_num += 1
529
+ if master_process:
530
+ progress_bar.update(progress_bar_update_freq)
531
+ # Termination conditions
532
+ if iter_num > max_iters:
533
+ break
534
+
535
+ if master_process:
536
+ progress_bar.close()
537
+ if wandb_log and master_process:
538
+ wandb.finish()
539
+
540
+ # Destroy the process group after all models have been trained
541
+ if ddp:
542
+ destroy_process_group()
543
+
544
+ if __name__ == '__main__':
545
+ main()
.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,+archive,ubuntu,ppa,ws.op=getSigningKeyData-application,json,c76e9ed0b661c7fa5da42e8fb2da319e ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ status: 200
2
+ date: Mon, 05 Feb 2024 23:25:35 GMT
3
+ server: gunicorn
4
+ x-powered-by: Zope (www.zope.org), Python (www.python.org)
5
+ content-security-policy: frame-ancestors 'self';
6
+ content-type: application/json
7
+ strict-transport-security: max-age=15552000
8
+ vary: Accept,Accept-Encoding
9
+ x-content-type-options: nosniff
10
+ x-frame-options: SAMEORIGIN
11
+ x-launchpad-revision: 9643586c585856148a18782148972ae9c1179d06
12
+ x-lazr-notifications: []
13
+ x-xss-protection: 1; mode=block
14
+ x-vcs-revision: 9643586c585856148a18782148972ae9c1179d06
15
+ x-request-id: 452e0c68-aa99-4bb4-abc3-237c7bb39fae
16
+ content-length: 1641
17
+ -content-encoding: gzip
18
+ content-location: https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa?ws.op=getSigningKeyData
19
+ -varied-accept: application/json
20
+ -varied-accept-encoding: gzip, deflate
21
+
22
+ "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFl8fYEBEADQmGZ6pDrwY9iH9DVlwNwTOvOZ7q7lHXPl/TLfMs1tckMc/D9a\nhsdBN9VWtMmo+RySvhkIe8X15r65TFs2HE8ft6j2e/4K472pObM1hB+ajiU/wYX2\nSyq7DBlNm6YMP5/SyQzRxqis4Ja1uUjW4Q5/Csdf5In8uMzXj5D1P7qOiP2aNa0E\nr3w6PXWRTuTihWZOsHv8npyVYDBRR6gEZbd3r86snI/7o8Bfmad3KjbxL7aOdNMw\nAqQFaNKl7Y+UJpv1CNFIf+twcOoC0se1SrsVJlAH9HNHM7XGQsPUwpNvQlcmvr+t\n1vVS2m72lk3gyShDuJpi1TifGw+DoTqu54U0k+0sZm4pnQVeiizNkefU2UqOoGlt\n4oiG9nIhSX04xRlGes3Ya0OjNI5b1xbcYoR+r0c3odI+UCw3VSZtKDX/xlH1o/82\nb8ouXeE7LA1i4DvGNj4VSvoxv4ggIznxMf+PkWXWKwRGsbAAXF52rr4FUaeaKoIU\nDkJqHXAxrB3PQslZ+ZgBEukkQZF76NkqRqP1E7FXzZZMo2eEL7vtnhSzUlanOf42\nECBoWHVoZQaRFMNbGpqlg9aWedHGyetMStS3nH1sqanr+i4I8VR/UH+ilarPTW3T\nE0apWlsH8+N3IKbRx2wgrRZNoQEuyVtvyewDFYShJB3Zxt7VCy67vKAl1QARAQAB\ntBxMYXVuY2hwYWQgUFBBIGZvciBkZWFkc25ha2VziQI4BBMBAgAiBQJZfH2BAhsD\nBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRC6aTI2anVXdvwhD/4oI3yckeKn\n9aJNNTJsyw4ydMkIAOdG+jbZsYv/rN73UVQF1RA8HC71SDmbd0Nu80koBOX+USuL\nvvhoMIsARlD5dLx5f/zaQcYWJm/BtsMF/eZ4s1xsenwW6PpXd8FpaTn1qtg/8+O9\n99R4uSetAhhyf1vSRb/8U0sgSQd38mpZZFq352UuVisXnmCThj621loQubYJ3lwU\nLSLs8wmgo4XIYH7UgdavV9dfplPh0M19RHQL3wTyQP2KRNRq1rG7/n1XzUwDyqY6\neMVhdVhvnxAGztvdFCySVzBRr/rCw6quhcYQwBqdqaXhz63np+4mlUNfd8Eu+Vas\nb/tbteF/pDu0yeFMpK4X09Cwn2kYYCpq4XujijW+iRWb4MO3G8LLi8oBAHP/k0CM\n/QvSRbbG8JDQkQDH37Efm8iE/EttJTixjKAIfyugmvEHfcrnxaMoBioa6h6McQrM\nvI8bJirxorJzOVF4kY7xXvMYwjzaDC8G0fTA8SzQRaShksR3USXZjz8vS6tZ+YNa\nmRHPoZ3Ua0bz4t2aCcu/fknVGsXcNBazNIK9WF2665Ut/b7lDbojXsUZ3PpuqOoe\nGQL9LRj7nmCI6ugoKkNp8ZXcGJ8BGw37Wep2ztyzDohXp6f/4mGgy2KYV9R4S8D5\nyBDUU6BS7Su5nhQMStfdfr4FffLmnvFC9w==\n=7hFk\n-----END PGP PUBLIC KEY BLOCK-----\n"
.launchpadlib/api.launchpad.net/cache/api.launchpad.net,devel,~deadsnakes,name=%22ppa%22&ws.op=getPPAByName-application,json,bca461ac71b1143128b6fbebfcd56851 ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ status: 200
2
+ date: Mon, 05 Feb 2024 23:25:35 GMT
3
+ server: gunicorn
4
+ x-powered-by: Zope (www.zope.org), Python (www.python.org)
5
+ content-security-policy: frame-ancestors 'self';
6
+ content-type: application/json
7
+ strict-transport-security: max-age=15552000
8
+ vary: Accept,Accept-Encoding
9
+ x-content-type-options: nosniff
10
+ x-frame-options: SAMEORIGIN
11
+ x-launchpad-revision: 9643586c585856148a18782148972ae9c1179d06
12
+ x-lazr-notifications: []
13
+ x-xss-protection: 1; mode=block
14
+ x-vcs-revision: 9643586c585856148a18782148972ae9c1179d06
15
+ x-request-id: ee5c3fda-04a1-41df-8644-2766d8c98b27
16
+ content-length: 4377
17
+ -content-encoding: gzip
18
+ content-location: https://api.launchpad.net/devel/~deadsnakes?name=%22ppa%22&ws.op=getPPAByName
19
+ -varied-accept: application/json
20
+ -varied-accept-encoding: gzip, deflate
21
+
22
+ {"self_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa", "web_link": "https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa", "resource_type_link": "https://api.launchpad.net/devel/#archive", "owner_link": "https://api.launchpad.net/devel/~deadsnakes", "name": "ppa", "displayname": "New Python Versions", "reference": "~deadsnakes/ubuntu/ppa", "distribution_link": "https://api.launchpad.net/devel/ubuntu", "private": false, "suppress_subscription_notifications": false, "dependencies_collection_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa/dependencies", "description": "This PPA contains more recent Python versions packaged for Ubuntu.\n\nDisclaimer: there's no guarantee of timely updates in case of security problems or other issues. If you want to use them in a security-or-otherwise-critical environment (say, on a production server), you do so at your own risk.\n\nUpdate Note\n===========\nPlease use this repository instead of ppa:fkrull/deadsnakes.\n\nReporting Issues\n================\n\nIssues can be reported in the master issue tracker at:\nhttps://github.com/deadsnakes/issues/issues\n\nSupported Ubuntu and Python Versions\n====================================\n\n- Ubuntu 20.04 (focal) Python3.5 - Python3.7, Python3.9 - Python3.13\n- Ubuntu 22.04 (jammy) Python3.7 - Python3.9, Python3.11 - Python3.13\n- Note: Python2.7 (all), Python 3.8 (focal), Python 3.10 (jammy) are not provided by deadsnakes as upstream ubuntu provides those packages.\n\nWhy some packages aren't built:\n- Note: for focal, older python versions require libssl\u003c1.1 so they are not currently built\n- Note: for jammy, older python versions requre libssl\u003c3 so they are not currently built\n- If you need these, reach out to asottile to set up a private ppa\n\nThe packages may also work on other versions of Ubuntu or Debian, but that is not tested or supported.\n\nPackages\n========\n\nThe packages provided here are loosely based on the debian upstream packages with some modifications to make them more usable as non-default pythons and on ubuntu. As such, the packages follow debian's patterns and often do not include a full python distribution with just `apt install python#.#`. Here is a list of packages that may be useful along with the default install:\n\n- `python#.#-dev`: includes development headers for building C extensions\n- `python#.#-venv`: provides the standard library `venv` module\n- `python#.#-distutils`: provides the standard library `distutils` module\n- `python#.#-lib2to3`: provides the `2to3-#.#` utility as well as the standard library `lib2to3` module\n- `python#.#-gdbm`: provides the standard library `dbm.gnu` module\n- `python#.#-tk`: provides the standard library `tkinter` module\n\nThird-Party Python Modules\n==========================\n\nPython modules in the official Ubuntu repositories are packaged to work with the Python interpreters from the official repositories. Accordingly, they generally won't work with the Python interpreters from this PPA. As an exception, pure-Python modules for Python 3 will work, but any compiled extension modules won't.\n\nTo install 3rd-party Python modules, you should use the common Python packaging tools. For an introduction into the Python packaging ecosystem and its tools, refer to the Python Packaging User Guide:\nhttps://packaging.python.org/installing/\n\nSources\n=======\nThe package sources are available at:\nhttps://github.com/deadsnakes/\n\nNightly Builds\n==============\n\nFor nightly builds, see ppa:deadsnakes/nightly https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly", "signing_key_fingerprint": "F23C5A6CF475977595C89F51BA6932366A755776", "require_virtualized": true, "build_debug_symbols": false, "publish_debug_symbols": false, "permit_obsolete_series_uploads": false, "authorized_size": 10240, "status": "Active", "external_dependencies": null, "processors_collection_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa/processors", "enabled_restricted_processors_collection_link": "https://api.launchpad.net/devel/~deadsnakes/+archive/ubuntu/ppa/enabled_restricted_processors", "publishing_method": "Local", "repository_format": "Debian", "publish": true, "relative_build_score": 0, "http_etag": "\"e23cc285682ec6a9eb87828016a9b36731e6dc4d-841a2599806ee01fd2d7dc9450f94c9cd2dba95c\""}
.local/share/Trash/info/train_005.bin.trashinfo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [Trash Info]
2
+ Path=/root/data/fineweb/train_005.bin
3
+ DeletionDate=2024-09-26T05:50:34
.local/share/jupyter/nbextensions/exercise2/main.js ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) IPython-Contrib Team.
2
+ // Distributed under the terms of the Modified BSD License.
3
+
4
+ // Hide or display solutions in a notebook
5
+
6
+ // dec 6, 2017 @jcb91: use bootstrap 'hidden' class to play nicely with collapsible_headings
7
+ // december 30, 2015: update to notebook 4.1.x
8
+ // updated on december 22, 2015 to allow consecutive exercises
9
+ // exercise2: built by @jfbercher from an earlier work by @junasch october 2015) - see readme.md
10
+
11
+ define([
12
+ 'base/js/namespace',
13
+ 'jquery',
14
+ 'require',
15
+ 'base/js/events',
16
+ ], function(IPython, $, requirejs, events) {
17
+ "use strict";
18
+
19
+ var cfg = {
20
+ add_button: true,
21
+ use_hotkey: true,
22
+ hotkey: 'Alt-D',
23
+ };
24
+
25
+ /**
26
+ * handle click event
27
+ *
28
+ * @method click_solution_lock
29
+ * @param evt {Event} jquery event
30
+ */
31
+ function click_solution_lock(evt) {
32
+ var cell = IPython.notebook.get_selected_cell();
33
+ var is_locked = cell.metadata.solution2 === 'hidden';
34
+ cell.metadata.solution2 = is_locked ? 'shown' : 'hidden';
35
+ element_set_locked(cell, !is_locked);
36
+ cell = IPython.notebook.get_next_cell(cell);
37
+ while (cell !== null && cell.metadata.solution2 !== undefined && !cell.metadata.solution2_first) {
38
+ cell.element.toggleClass('hidden', !is_locked);
39
+ cell.metadata.solution2 = is_locked ? 'shown' : 'hidden';
40
+ cell = IPython.notebook.get_next_cell(cell);
41
+ }
42
+ }
43
+
44
+ /**
45
+ * Create or Remove an exercise in selected cells
46
+ *
47
+ * @method create_remove_exercise
48
+ *
49
+ */
50
+ function create_remove_exercise() {
51
+ var lcells = IPython.notebook.get_selected_cells();
52
+ // It is possible that no cell is selected
53
+ if (lcells.length < 1) {
54
+ alert("Exercise extension: \nPlease select some cells...");
55
+ return;
56
+ }
57
+
58
+ var cell = lcells[0];
59
+ if (cell.metadata.solution2_first) {
60
+ remove_element(cell);
61
+ delete cell.metadata.solution2_first;
62
+ while (cell !== null && cell.metadata.solution2 !== undefined && !cell.metadata.solution2_first) {
63
+ delete cell.metadata.solution2;
64
+ cell.element.removeClass('hidden');
65
+ cell = IPython.notebook.get_next_cell(cell);
66
+ }
67
+ }
68
+ else {
69
+ cell.metadata.solution2_first = true;
70
+ cell.metadata.solution2 = 'hidden';
71
+ add_element(cell);
72
+ for (var k = 1; k < lcells.length; k++) {
73
+ cell = lcells[k];
74
+ cell.element.addClass('hidden');
75
+ cell.metadata.solution2 = 'hidden';
76
+ }
77
+ }
78
+ }
79
+
80
+ /**
81
+ * Add a lock control to the given cell
82
+ */
83
+ var cbx = 0;
84
+ function add_element(cell) {
85
+ var ctrl = cell.element.find('.exercise');
86
+ if (ctrl.length > 0) return ctrl;
87
+ var locked = cell.metadata.solution2 === 'hidden';
88
+ cell.element.css('flex-wrap', 'wrap');
89
+ cbx += 1;
90
+ ctrl = $([
91
+ '<div class="exercise exercise2">',
92
+ ' <div class="prompt"></div>',
93
+ ' <div class="onoffswitch">',
94
+ ' <input class="onoffswitch-checkbox" type="checkbox" id="myCheck' + cbx + '">',
95
+ ' <label class="onoffswitch-label" for="myCheck' + cbx + '">',
96
+ ' <div class="onoffswitch-inner"></div>',
97
+ ' <div class="onoffswitch-switch"></div>',
98
+ ' </label>',
99
+ ' </div>',
100
+ '</div>'
101
+ ].join('\n'))
102
+ .appendTo(cell.element);
103
+ ctrl.find('input')
104
+ .on('click', click_solution_lock);
105
+ element_set_locked(cell, locked);
106
+ return ctrl;
107
+ }
108
+
109
+ function remove_element(cell) {
110
+ cell.element.find('.exercise').remove();
111
+ }
112
+
113
+ function element_set_locked(cell, locked) {
114
+ return cell.element.find('.exercise')
115
+ .prop('checked', !locked);
116
+ }
117
+
118
+ function refresh_exercises() {
119
+ var in_exercise = false;
120
+ IPython.notebook.get_cells().forEach(function(cell) {
121
+ if (in_exercise && cell.metadata.solution2 !== undefined && !cell.metadata.solution2_first) {
122
+ cell.element.toggleClass('hidden', cell.metadata.solution2 === 'hidden');
123
+ } else {
124
+ in_exercise = false;
125
+ }
126
+ if (!in_exercise && cell.metadata.solution2 !== undefined) {
127
+ in_exercise = true;
128
+ add_element(cell);
129
+ }
130
+ });
131
+ }
132
+
133
+ function load_ipython_extension() {
134
+ // add css
135
+ $('<link rel="stylesheet" type="text/css">')
136
+ .attr('href', requirejs.toUrl('./main.css'))
137
+ .appendTo('head');
138
+
139
+ // Hide/display existing solutions at startup
140
+ events.on('notebook_loaded.Notebook', refresh_exercises);
141
+ if (IPython.notebook._fully_loaded) refresh_exercises();
142
+
143
+ var action_name = IPython.keyboard_manager.actions.register({
144
+ help : 'Exercise2: Create/Remove exercise',
145
+ help_index: 'ht',
146
+ icon : 'fa-toggle-on',
147
+ handler : create_remove_exercise,
148
+ }, 'create_remove_exercise', 'exercise2');
149
+
150
+ return IPython.notebook.config.loaded.then(function() {
151
+ $.extend(true, cfg, IPython.notebook.config.data.exercise2);
152
+
153
+ if (cfg.add_button) {
154
+ IPython.toolbar.add_buttons_group([action_name]);
155
+ }
156
+ if (cfg.use_hotkey && cfg.hotkey) {
157
+ var cmd_shrts = {};
158
+ cmd_shrts[cfg.hotkey] = action_name;
159
+ IPython.keyboard_manager.command_shortcuts.add_shortcuts(cmd_shrts);
160
+ }
161
+ }).catch(function(err) {
162
+ console.warn('[exercise2] error:', err);
163
+ });
164
+ }
165
+
166
+ return {
167
+ load_ipython_extension: load_ipython_extension,
168
+ };
169
+ });
.local/share/jupyter/nbextensions/freeze/config.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Type: IPython Notebook Extension
2
+ Name: Freeze
3
+ Description: Freeze cells (forbid editing and executing) or make them read-only
4
+ Link: readme.md
5
+ Icon: icon.png
6
+ Main: main.js
7
+ Compatibility: 4.x, 5.x
8
+ Parameters:
9
+ - name: Freeze.readonly_color
10
+ description: |
11
+ Color to use for read-only cell
12
+ default: '#fffef0'
13
+ input_type: color
14
+
15
+ - name: Freeze.frozen_color
16
+ description: |
17
+ Color to use for frozen cell
18
+ default: '#f0feff'
19
+ input_type: color
20
+
.local/share/jupyter/nbextensions/freeze/icon.png ADDED
.local/share/jupyter/nbextensions/gist_it/icon.png ADDED
.local/share/jupyter/nbextensions/go_to_current_running_cell/auto_focus.gif ADDED

Git LFS Details

  • SHA256: 3dc033a545fe3eccdeee6e66932f1a46de4d0cafe084d471165e750ede1dcc4f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.75 MB
.local/share/jupyter/nbextensions/go_to_current_running_cell/eye.png ADDED
.local/share/jupyter/nbextensions/go_to_current_running_cell/main.js ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Jupyter-Contrib Team.
2
+ // Distributed under the terms of the Modified BSD License.
3
+
4
+ // This is an extension allows you to jump to the current running cell.
5
+ // You can also activate this functionality automatically,
6
+ // i.e., your view is always scolling to the current cell.
7
+
8
+ //
9
+ // Keyboard shortcuts: Alt-I and Alt-down (works with single cells also -- this is useful!)
10
+ // The extension is simple, create function and then register the action and shortkey separately,
11
+ // so that user can update the shortkey according to their need.
12
+
13
+
14
+
15
+ define([
16
+ 'base/js/namespace',
17
+ 'jquery',
18
+ 'require',
19
+ 'base/js/events'
20
+ ], function (Jupyter, $, requirejs, events) {
21
+ "use strict";
22
+
23
+ var action_follow_cell_on; // set on registration
24
+ var action_follow_cell_off; // set on registration
25
+ var action_go_to_runing_cell; // set on registration
26
+ var params = {
27
+ is_follow_cell: false,
28
+ go_to_running_cell_shortcut: 'Alt-I',
29
+ follow_cell_on_shortcut: "Alt-;",
30
+ follow_cell_off_shortcut: "Alt-'",
31
+ button_icon: 'fa-anchor'
32
+ };
33
+
34
+ function scrollIntoRunningCell(event, data) {
35
+ $('.running')[0].scrollIntoView({ behavior: 'smooth', inline: 'center' });
36
+ }
37
+
38
+ // update params with any specified in the server's config file
39
+ var update_params = function () {
40
+ var config = Jupyter.notebook.config;
41
+ for (var key in params) {
42
+ if (config.data.hasOwnProperty(key))
43
+ params[key] = config.data[key];
44
+ }
45
+ };
46
+
47
+ // Go to Running cell shortcut
48
+ function go_to_running_cell(event) {
49
+
50
+ // Find running cell and click the first one
51
+ if ($('.running').length > 0) {
52
+ $('.running')[0].scrollIntoView();
53
+ }
54
+ return false;
55
+ }
56
+
57
+ function follow_running_cell_on(event) {
58
+ Jupyter.notebook.events.on('finished_execute.CodeCell', scrollIntoRunningCell);
59
+ return false;
60
+ }
61
+
62
+ function follow_running_cell_off(event) {
63
+ Jupyter.notebook.events.off('finished_execute.CodeCell', scrollIntoRunningCell);
64
+ return false;
65
+ }
66
+
67
+ // Register actions to collapse and uncollapse the selected heading cell
68
+
69
+ function register_new_actions() {
70
+ action_go_to_runing_cell = Jupyter.keyboard_manager.actions.register({
71
+ handler: go_to_running_cell,
72
+ help: "Go to first executing cell",
73
+ help_index: 'aa',
74
+ icon: params.button_icon
75
+ }, 'Go to first running cell', 'Go To Running Cell'
76
+ )
77
+ action_follow_cell_on = Jupyter.keyboard_manager.actions.register({
78
+ handler: follow_running_cell_on,
79
+ help: "Follow running cell on",
80
+ help_index: 'aa'
81
+ }, 'Follow running cell on', 'Go To Running Cell'
82
+ )
83
+ action_follow_cell_off = Jupyter.keyboard_manager.actions.register({
84
+ handler: follow_running_cell_off,
85
+ help: "Follow running cell off",
86
+ help_index: 'aa'
87
+ }, 'Follow running cell off', 'Go To Running Cell'
88
+ );
89
+
90
+ if (params.is_follow_cell) {
91
+ Jupyter.notebook.events.on('finished_execute.CodeCell', scrollIntoRunningCell);
92
+ }
93
+ }
94
+
95
+ // Register keyboard shortcuts according to parameters
96
+ function register_keyboard_shortcuts() {
97
+
98
+ var shortcut, edit_shortcuts = Jupyter.keyboard_manager.command_shortcuts;
99
+ shortcut = params.go_to_running_cell_shortcut;
100
+ if (shortcut) {
101
+ edit_shortcuts.add_shortcut(shortcut, action_go_to_runing_cell);
102
+ }
103
+
104
+ shortcut = params.follow_cell_on_shortcut;
105
+ if (shortcut) {
106
+ edit_shortcuts.add_shortcut(shortcut, action_follow_cell_on);
107
+ }
108
+
109
+ shortcut = params.follow_cell_off_shortcut;
110
+ if (shortcut) {
111
+ edit_shortcuts.add_shortcut(shortcut, action_follow_cell_off);
112
+ }
113
+ }
114
+
115
+ function load_ipython_extension() {
116
+ update_params();
117
+ register_new_actions();
118
+ register_keyboard_shortcuts();
119
+ Jupyter.toolbar.add_buttons_group([action_go_to_runing_cell])
120
+ }
121
+
122
+ return {
123
+ load_ipython_extension: load_ipython_extension,
124
+ };
125
+
126
+ });
.local/share/jupyter/nbextensions/help_panel/readme.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Help Panel
2
+ ===========
3
+
4
+ Installing the extension adds a new button to the toolbar:
5
+
6
+ ![](icon.png)
7
+
8
+ On clicking the button, the notebook width is reduced and a side panel is displayed showing help.
9
+ The contents of the help panel are exactly the same as when going to `Keyboard Shortcuts` in the `Help` menu.
10
+
11
+ ![](help_panel_ext.png)
12
+
13
+ You can drag the sidebar divider to resize it, or click the expand icon at the top left of the bar to get the help panel to expand to fill the screen:
14
+
15
+ ![](help_panel_ext_fullscreen.png)
.local/share/jupyter/nbextensions/hide_input/readme.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hide Input
2
+ ==========
3
+
4
+ This extension allows hiding of an individual codecell in a notebook. This can
5
+ be achieved by clicking on the toolbar button:
6
+
7
+ ![](icon.png)
8
+
9
+
10
+ Internals
11
+ ---------
12
+
13
+ The codecell hiding state is stored in the metadata `cell.metadata.hide_input`.
14
+ If it is set to `true`, the codecell will be hidden on reload.
15
+
16
+
17
+ Exporting with nbconvert
18
+ ------------------------
19
+
20
+ See also the general docs for exporting using nbconvert at
21
+ [jupyter-contrib-nbextensions.readthedocs.io](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/).
22
+
23
+ To export a notebook with hidden cell inputs using nbconvert, you need to use a
24
+ custom template.
25
+ The required template is supplied as part of
26
+ `jupyter_contrib_nbextensions.nbconvert_support`, or you can roll your own
27
+ using the provided ones as examples. Again, see the docs linked above for more
28
+ information.
29
+
30
+ The `nbextensions.tpl` template is provided in the
31
+ `jupyter_contrib_nbextensions.nbconvert_support` templates directory (see the
32
+ docs mentioned above for how to find it)
33
+
34
+ To use, add the template to your `nbconvert` call:
35
+
36
+ jupyter nbconvert --template=nbextensions --to=html my_notebook.ipynb
37
+
38
+ The nbextensions template will respect the `cell.metadata.hide_input` flag, and
39
+ filter the cell's output prompt (the bit that looks like `Out[27]:`).
40
+ The filter is only used for html output, not for PDF or LaTeX output.
41
+
42
+ If you want to _keep_ the cell output prompt, you will have to remove the lines
43
+
44
+ {% block output_group -%}
45
+ {%- if cell.metadata.hide_output or nb.metadata.hide_input -%}
46
+ {%- else -%}
47
+ {{ super() }}
48
+ {%- endif -%}
49
+ {% endblock output_group %}
50
+
51
+ in the `nbextensions.tpl` file.
.local/share/jupyter/nbextensions/highlight_selected_word/configurator.yaml ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Type: Jupyter Notebook Extension
2
+ Compatibility: 4.x, 5.x
3
+ Name: Highlight selected word
4
+ Main: main.js
5
+ Description: Enables the CodeMirror addon "Match Highlighter"
6
+ Link: README.md
7
+ Parameters:
8
+
9
+ - name: highlight_selected_word.enable_on_load
10
+ input_type: checkbox
11
+ default: true
12
+ description: |
13
+ Enable highlighting on loading the notebook interface.
14
+ The highlighting can also be toggled from the view menu
15
+
16
+ - name: highlight_selected_word.highlight_across_all_cells
17
+ input_type: checkbox
18
+ default: true
19
+ description: |
20
+ Highlight matches across all cells. If false, only matches within the
21
+ currently selected cell will be highlighted.
22
+
23
+ - name: highlight_selected_word.code_cells_only
24
+ input_type: checkbox
25
+ default: false
26
+ description: |
27
+ Only apply highlights to editors for Code cells, not, for example, Markdown
28
+ or Raw cells
29
+
30
+ - name: highlight_selected_word.highlight_color
31
+ input_type: color
32
+ default: '#90EE90'
33
+ description: Color used to highlight matching words in the focussed cell
34
+
35
+ - name: highlight_selected_word.highlight_color_blurred
36
+ input_type: color
37
+ default: '#BBFFBB'
38
+ description: Color used to highlight matching words in blurred (non-active) cells
39
+
40
+ - name: highlight_selected_word.outlines_only
41
+ input_type: checkbox
42
+ default: false
43
+ description: |
44
+ Highlight words using just an outline, rather than the background color
45
+
46
+ - name: highlight_selected_word.outline_width
47
+ input_type: number
48
+ default: 1
49
+ min: 0.5
50
+ step: 0.5
51
+ description: |
52
+ Width, in pixels, of the outline used to highlight words when the
53
+ outline-only setting is selected.
54
+
55
+ - name: highlight_selected_word.delay
56
+ input_type: number
57
+ default: 100
58
+ min: 0
59
+ step: 1
60
+ description: 'Wait time, in milliseconds, before highlighting the matches'
61
+
62
+ - name: highlight_selected_word.words_only
63
+ input_type: checkbox
64
+ default: false
65
+ description: Only highlight matches if the selected text is a whole word
66
+
67
+ - name: highlight_selected_word.highlight_only_whole_words
68
+ input_type: checkbox
69
+ default: true
70
+ description: |
71
+ Only highlight matches when they are surrounded by non-word characters, as
72
+ determined by the token below (if set), or the default regex '[\w$]'.
73
+
74
+ - name: highlight_selected_word.show_token
75
+ input_type: text
76
+ default: '[\w$]' # single-quote strings in yaml are like python raw strings
77
+ description: |
78
+ Token (regex) to identify word characters, used to determine what to
79
+ highlight when nothing is selected. If blank, nothing is highlighted when
80
+ nothing is selected.
81
+
82
+ - name: highlight_selected_word.min_chars
83
+ input_type: number
84
+ default: 2
85
+ min: 0
86
+ step: 1
87
+ description: |
88
+ Minimum number of characters that must be selected for the highlighting
89
+ to occur (assuming no token is set for use when nothing is selected)
90
+
91
+ - name: highlight_selected_word.trim
92
+ input_type: checkbox
93
+ default: true
94
+ description: |
95
+ Trim whitespace from selection text before checking for minimum length
96
+
97
+ - name: highlight_selected_word.use_toggle_hotkey
98
+ input_type: checkbox
99
+ default: false
100
+ description: |
101
+ Bind the highlight_selected_word:toggle action to a hotkey
102
+
103
+ - name: highlight_selected_word.toggle_hotkey
104
+ input_type: hotkey
105
+ default: 'alt-h'
106
+ description: |
107
+ Hotkey to bind to the highlight_selected_word:toggle action (if selected
108
+ for use, above)
109
+
110
+ - name: highlight_selected_word.only_cells_in_scroll
111
+ input_type: checkbox
112
+ default: true
113
+ description: |
114
+ Only apply highlights to editors which are visible in the scrolled view.
115
+ This may offer performance benefits for larger notebooks
116
+
117
+ - name: highlight_selected_word.scroll_min_delay
118
+ input_type: number
119
+ default: 100
120
+ min: 0
121
+ step: 10
122
+ description: |
123
+ Minimum delay in ms between updating highlights on scrolling the notebook
124
+ (used only if limiting highlights to those in scrolled view, see above).
125
+ If set to zero, no update is done on scroll.
126
+
127
+ - name: highlight_selected_word.hide_selections_in_unfocussed
128
+ input_type: checkbox
129
+ default: false
130
+ description: |
131
+ Hide any text selection in non-focussed cells (can be confused with match highlights).
.local/share/jupyter/nbextensions/init_cell/main.js ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ define([
2
+ 'jquery',
3
+ 'base/js/dialog',
4
+ 'base/js/events',
5
+ 'base/js/namespace',
6
+ 'notebook/js/celltoolbar',
7
+ 'notebook/js/codecell',
8
+ ], function (
9
+ $,
10
+ dialog,
11
+ events,
12
+ Jupyter,
13
+ celltoolbar,
14
+ codecell
15
+ ) {
16
+ "use strict";
17
+
18
+ var CellToolbar = celltoolbar.CellToolbar;
19
+
20
+ var mod_name = 'init_cell';
21
+ var log_prefix = '[' + mod_name + ']';
22
+ var options = { // updated from server's config & nb metadata
23
+ run_on_kernel_ready: true,
24
+ };
25
+
26
+ var toolbar_preset_name = 'Initialization Cell';
27
+ var init_cell_ui_callback = CellToolbar.utils.checkbox_ui_generator(
28
+ toolbar_preset_name,
29
+ function setter (cell, value) {
30
+ if (value) {
31
+ cell.metadata.init_cell = true;
32
+ }
33
+ else {
34
+ delete cell.metadata.init_cell;
35
+ }
36
+ },
37
+ function getter (cell) {
38
+ // if init_cell is undefined, it'll be interpreted as false anyway
39
+ return cell.metadata.init_cell;
40
+ }
41
+ );
42
+
43
+ function count_init_cells () {
44
+ console.log(log_prefix, 'counting initialization cells');
45
+ var num = 0;
46
+ var cells = Jupyter.notebook.get_cells();
47
+ for (var ii = 0; ii < cells.length; ii++) {
48
+ var cell = cells[ii];
49
+ if ((cell instanceof codecell.CodeCell) && cell.metadata.init_cell === true ) {
50
+ num++;
51
+ }
52
+ }
53
+ console.log(log_prefix, 'found ' + num + ' initialization cell' + (num !== 1 ? 's' : ''));
54
+ return num
55
+ }
56
+
57
+ function run_init_cells () {
58
+ console.log(log_prefix, 'running all initialization cells');
59
+ var num = 0;
60
+ var cells = Jupyter.notebook.get_cells();
61
+ for (var ii = 0; ii < cells.length; ii++) {
62
+ var cell = cells[ii];
63
+ if ((cell instanceof codecell.CodeCell) && cell.metadata.init_cell === true ) {
64
+ cell.execute();
65
+ num++;
66
+ }
67
+ }
68
+ console.log(log_prefix, 'finished running ' + num + ' initialization cell' + (num !== 1 ? 's' : ''));
69
+ }
70
+
71
+ var load_ipython_extension = function() {
72
+ // register action
73
+ var prefix = 'auto';
74
+ var action_name = 'run-initialization-cells';
75
+ var action = {
76
+ icon: 'fa-calculator',
77
+ help: 'Run all initialization cells',
78
+ help_index : 'zz',
79
+ handler : run_init_cells
80
+ };
81
+ var action_full_name = Jupyter.notebook.keyboard_manager.actions.register(action, action_name, prefix);
82
+
83
+ // add toolbar button
84
+ Jupyter.toolbar.add_buttons_group([action_full_name]);
85
+
86
+ // setup things to run on loading config/notebook
87
+ Jupyter.notebook.config.loaded
88
+ .then(function update_options_from_config () {
89
+ $.extend(true, options, Jupyter.notebook.config.data[mod_name]);
90
+ }, function (reason) {
91
+ console.warn(log_prefix, 'error loading config:', reason);
92
+ })
93
+ .then(function () {
94
+ if (Jupyter.notebook._fully_loaded) {
95
+ callback_notebook_loaded();
96
+ }
97
+ events.on('notebook_loaded.Notebook', callback_notebook_loaded);
98
+ }).catch(function (reason) {
99
+ console.error(log_prefix, 'unhandled error:', reason);
100
+ });
101
+ };
102
+
103
+ function callback_notebook_loaded () {
104
+ // update from metadata
105
+ var md_opts = Jupyter.notebook.metadata[mod_name];
106
+ if (md_opts !== undefined) {
107
+ console.log(log_prefix, 'updating options from notebook metadata:', md_opts);
108
+ $.extend(true, options, md_opts);
109
+ }
110
+
111
+ // register celltoolbar presets if they haven't been already
112
+ if (CellToolbar.list_presets().indexOf(toolbar_preset_name) < 0) {
113
+ // Register a callback to create a UI element for a cell toolbar.
114
+ CellToolbar.register_callback('init_cell.is_init_cell', init_cell_ui_callback, 'code');
115
+ // Register a preset of UI elements forming a cell toolbar.
116
+ CellToolbar.register_preset(toolbar_preset_name, ['init_cell.is_init_cell'], Jupyter.notebook);
117
+ }
118
+
119
+ if (options.run_on_kernel_ready) {
120
+ var num = count_init_cells();
121
+
122
+ if (num) {
123
+ if (Jupyter.notebook.trusted) {
124
+ run_init_cells_asap()
125
+ }
126
+ else {
127
+ dialog.modal({
128
+ title : 'Untrusted notebook with initialization code',
129
+ body : num + ' initialization code cell' + (num !== 1 ? 's' : '') + ' was found but not run since this notebook is untrusted.',
130
+ buttons: {
131
+ 'Trust notebook': {
132
+ 'class' : 'btn-danger',
133
+ 'click' : () => Jupyter.notebook.trust_notebook()
134
+ },
135
+ 'Do nothing': {'class' : 'btn-primary'}
136
+ },
137
+ notebook: Jupyter.notebook,
138
+ keyboard_manager: Jupyter.keyboard_manager,
139
+ });
140
+ }
141
+ }
142
+ }
143
+ }
144
+
145
+ function run_init_cells_asap () {
146
+ if (Jupyter.notebook && Jupyter.notebook.kernel && Jupyter.notebook.kernel.info_reply.status === 'ok') {
147
+ // kernel is already ready
148
+ run_init_cells();
149
+ }
150
+ // whenever a (new) kernel becomes ready, run all initialization cells
151
+ events.on('kernel_ready.Kernel', run_init_cells);
152
+ }
153
+
154
+ return {
155
+ load_ipython_extension : load_ipython_extension
156
+ };
157
+ });
.local/share/jupyter/nbextensions/keyboard_shortcut_editor/keyboard_shortcut_editor.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Type: IPython Notebook Extension
2
+ Compatibility: 4.x, 5.x
3
+ Name: Keyboard shortcut editor
4
+ Main: main.js
5
+ Icon: icon.png
6
+ Link: README.md
7
+ Description: Edit or remove Jupyter keyboard shortcuts, or add you own new ones
8
+ Parameters:
9
+ - name: kse_show_rebinds
10
+ description: "Show shortcut editing controls in the shortcuts dialog. If this is false, shortcuts can't be edited directly from the notebook, but any existing edits are still applied. Useful essentially just to make the shortcuts dialog a bit cleaner"
11
+ input_type: checkbox
12
+ default: true
.local/share/jupyter/nbextensions/keyboard_shortcut_editor/main.css ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .kse-dropdown {
2
+ margin-left: 0.5em;
3
+ }
4
+
5
+ .kse-dropdown > .dropdown-menu {
6
+ min-width: 0;
7
+ top: 20px;
8
+ }
9
+
10
+ .kse-input-group-pretty {
11
+ min-width: 20ex;
12
+ }
13
+
14
+ .kse-modal-backdrop {
15
+ background-color: #fff;
16
+ }
17
+
18
+ .kse-input-group-reset,
19
+ .kse-input-group-pretty {
20
+ border-right: none;
21
+ }
22
+
23
+ .kse-input-group-pretty > kbd {
24
+ color: black;
25
+ font-weight: bold;
26
+ }
27
+
28
+ .kse-editor .help-block > p {
29
+ margin-bottom: 10px;
30
+ }
31
+
32
+ .kse-editor select {
33
+ display: inline-block;
34
+ width: auto;
35
+ margin: 0;
36
+ }
37
+
38
+ .kse-links .fa {
39
+ margin-right: 2px;
40
+ }
.local/share/jupyter/nbextensions/keyboard_shortcut_editor/readme_reset_disabled.png ADDED
.local/share/jupyter/nbextensions/limit_output/icon.png ADDED
.local/share/jupyter/nbextensions/limit_output/main.js ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Restrict output in a codecell to a maximum length
2
+
3
+ define([
4
+ 'base/js/namespace',
5
+ 'notebook/js/outputarea',
6
+ 'notebook/js/codecell',
7
+ ], function(
8
+ Jupyter,
9
+ oa,
10
+ cc
11
+ ) {
12
+ "use strict";
13
+
14
+ // define default values for config parameters
15
+ var params = {
16
+ // maximum number of characters the output area is allowed to print
17
+ limit_output : 10000,
18
+ limit_stream : true,
19
+ limit_execute_result : true,
20
+ limit_display_data : false,
21
+ // message to print when output is limited
22
+ limit_output_message : '<b>limit_output extension: Maximum message size of {limit_output_length} exceeded with {output_length} characters</b>'
23
+ };
24
+
25
+ // to be called once config is loaded, this updates default config vals
26
+ // with the ones specified by the server's config file
27
+ var update_params = function() {
28
+ var config = Jupyter.notebook.config;
29
+ for (var key in params) {
30
+ if (config.data.hasOwnProperty(key) ){
31
+ params[key] = config.data[key];
32
+ }
33
+ }
34
+ };
35
+
36
+ function is_finite_number (n) {
37
+ n = parseFloat(n);
38
+ return !isNaN(n) && isFinite(n);
39
+ }
40
+
41
+ var initialize = function () {
42
+ update_params();
43
+ // sometimes limit_output metadata val can get stored as a string
44
+ params.limit_output = parseFloat(params.limit_output);
45
+ var old_handle_output = oa.OutputArea.prototype.handle_output;
46
+ oa.OutputArea.prototype.handle_output = function (msg) {
47
+ var handled_msg_types = ['stream', 'execute_result', 'display_data'];
48
+ if (handled_msg_types.indexOf(msg.header.msg_type) < 0) {
49
+ return old_handle_output.apply(this, arguments);
50
+ }
51
+ else {
52
+ // get MAX_CHARACTERS from cell metadata if present, otherwise param
53
+ //msg.header.msg_type
54
+ var MAX_CHARACTERS = params.limit_output;
55
+ var cell_metadata = this.element.closest('.cell').data('cell').metadata;
56
+ if (is_finite_number(cell_metadata.limit_output)) {
57
+ MAX_CHARACTERS = parseFloat(cell_metadata.limit_output);
58
+ }
59
+
60
+ // read the length of already-appended outputs from our data
61
+ var count = this.element.data('limit_output_count') || 0;
62
+ // update count with the length of this message
63
+ var old_count = count;
64
+ if (msg.header.msg_type === "stream" && params.limit_stream) {
65
+ count += String(msg.content.text).length;
66
+ }
67
+ else {
68
+ if ((msg.header.msg_type === "execute_result" && params.limit_execute_result) ||
69
+ (msg.header.msg_type === "display_data" && params.limit_display_data)) {
70
+ count += Math.max(
71
+ (msg.content.data['text/plain'] === undefined) ? 0 : String(msg.content.data['text/plain']).length,
72
+ (msg.content.data['text/html'] === undefined) ? 0 : String(msg.content.data['text/html']).length
73
+ );
74
+ }
75
+
76
+ }
77
+ // save updated count
78
+ this.element.data('limit_output_count', count);
79
+
80
+ if (count <= MAX_CHARACTERS) {
81
+ return old_handle_output.apply(this, arguments);
82
+ }
83
+ // if here, we'd exceed MAX_CHARACTERS with addition of this message.
84
+ if (old_count <= MAX_CHARACTERS) {
85
+ // Apply truncated portion of this message
86
+ var to_add = MAX_CHARACTERS - old_count;
87
+ if (msg.header.msg_type === "stream") {
88
+ msg.content.text = msg.content.text.substr(0, to_add);
89
+ }
90
+ else {
91
+ if (msg.content.data['text/plain'] !== undefined) {
92
+ msg.content.data['text/plain'] = msg.content.data['text/plain'].substr(0, to_add);
93
+ }
94
+ if (msg.content.data['text/html'] !== undefined) {
95
+ msg.content.data['text/html'] = msg.content.data['text/html'].substr(0, to_add);
96
+ }
97
+ }
98
+ old_handle_output.apply(this, arguments);
99
+
100
+ // display limit notification messages
101
+ console.log(
102
+ "limit_output: Maximum message size of", MAX_CHARACTERS,
103
+ "exceeded with", count, "characters. Further output muted."
104
+ );
105
+ // allow simple substitutions for output length for quick debugging
106
+ var limitmsg = params.limit_output_message.replace("{message_type}", msg.header.msg_type)
107
+ .replace("{limit_output_length}", MAX_CHARACTERS)
108
+ .replace("{output_length}", count);
109
+ this.append_output({
110
+ "output_type": "display_data",
111
+ "metadata": {}, // included to avoid warning
112
+ "data": {"text/html": limitmsg}
113
+ });
114
+ }
115
+ }
116
+ };
117
+
118
+ var old_clear_output = oa.OutputArea.prototype.clear_output;
119
+ oa.OutputArea.prototype.clear_output = function () {
120
+ // reset counter on execution.
121
+ this.element.data('limit_output_count', 0);
122
+ return old_clear_output.apply(this, arguments);
123
+ };
124
+ };
125
+
126
+ var load_ipython_extension = function() {
127
+ return Jupyter.notebook.config.loaded.then(initialize);
128
+ };
129
+
130
+ return {
131
+ load_ipython_extension : load_ipython_extension
132
+ };
133
+ });
.local/share/jupyter/nbextensions/limit_output/readme.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Limit Output
2
+ ============
3
+
4
+
5
+ Description
6
+ -----------
7
+
8
+ This extension limits the number of characters a codecell will output as text
9
+ or HTML.
10
+ This also allows the interruption of endless loops of print commands.
11
+
12
+ [![Demo Video](https://img.youtube.com/vi/U26ujuPXf00/0.jpg)](https://youtu.be/U26ujuPXf00)
13
+
14
+ You can set the number of characters using the ConfigManager:
15
+
16
+ ```python
17
+ from notebook.services.config import ConfigManager
18
+ cm = ConfigManager().update('notebook', {'limit_output': 1000})
19
+ ```
20
+
21
+ or by using the [jupyter_nbextensions_configurator](https://github.com/Jupyter-contrib/jupyter_nbextensions_configurator)
22
+
23
+ The limit can also be set for an individual cell, using the cell's
24
+ `cell.metadata.limit_output`.
25
+
26
+
27
+ Internals
28
+ ---------
29
+
30
+ Three types of messages are intercepted: `stream`, `execute_result`, and
31
+ `display_data`. For `stream`-type messages, the text string length is limited
32
+ to `limit_output` number of characters.
33
+ For other message types, `text/plain` and `text/html` content length is
34
+ counted, and if either exceeds `limit_output` characters will be truncated to
35
+ `limit_output` number of characters.
36
+
37
+ The `limit_output_message` parameter can be formatted to display the
38
+ `limit_output` length and the current `output_length`, using the respective
39
+ replacement fields `{limit_output_length}` and `{output_length}`.
40
+
41
+ ### Parameter Overview
42
+
43
+ * limit_output - Number of characters to limit output to
44
+ * limit_stream - Enable limiting stream messages
45
+ * limit_execute_result - Enable limiting execute_result messages
46
+ * limit_display_data - Enable limiting display_data messages
47
+ * limit_output_message - Message to append when output is limited
48
+
.local/share/jupyter/nbextensions/livemdpreview/livemdpreview.js ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ define([
2
+ 'jquery',
3
+ 'require',
4
+ 'base/js/namespace',
5
+ 'base/js/events',
6
+ 'base/js/utils',
7
+ 'notebook/js/cell',
8
+ 'notebook/js/textcell',
9
+ 'codemirror/lib/codemirror',
10
+ ], function (
11
+ $,
12
+ requirejs,
13
+ Jupyter,
14
+ events,
15
+ utils,
16
+ cell_mod,
17
+ textcell,
18
+ CodeMirror
19
+ ) {
20
+ "use strict";
21
+
22
+ var LiveMdPreviewer = function(options) {
23
+ options = $.extend(true, {}, this._default_options, options);
24
+ this.show_side_by_side = options.show_side_by_side;
25
+ this.timeout = Math.max(50, options.timeout);
26
+
27
+ this.addCSS();
28
+ var lmdp = this;
29
+ // Change any existing cells:
30
+ Jupyter.notebook.get_cells().forEach(function (cell) {
31
+ lmdp.registerCell(cell);
32
+ });
33
+ // Ensure we also apply to new cells:
34
+ events.on('create.Cell', function (evt, data) { lmdp.registerCell(data.cell); });
35
+ };
36
+
37
+ LiveMdPreviewer.prototype._default_options = {
38
+ show_side_by_side: false,
39
+ timeout : 500,
40
+ };
41
+
42
+ /**
43
+ * do work of rendering the markdown cell, without triggering the rendered
44
+ * event, or altering classes on elements
45
+ */
46
+ var previewMdCell = function(cell) {
47
+ var cached_trigger = cell.events.trigger;
48
+ cell.events.trigger = function (eventType) {
49
+ if (eventType !== "rendered.MarkdownCell") {
50
+ return cached_trigger.apply(this, arguments);
51
+ }
52
+ return this;
53
+ };
54
+
55
+ var Cell = cell_mod.Cell;
56
+ var cached_render = Cell.prototype.render;
57
+ Cell.prototype.render = function () {
58
+ return true;
59
+ };
60
+
61
+ try {
62
+ cell.render();
63
+ }
64
+ finally {
65
+ cell.events.trigger = cached_trigger;
66
+ Cell.prototype.render = cached_render;
67
+ }
68
+ };
69
+
70
+ LiveMdPreviewer.prototype.registerCell = function(cell) {
71
+ if (!(cell instanceof textcell.TextCell)) {
72
+ return;
73
+ }
74
+ var timeout = this.timeout;
75
+ cell.code_mirror.on('changes', function onCodeMirrorChanges (cm, changes) {
76
+ if (!cm.state.livemdpreview) {
77
+ cm.state.livemdpreview = setTimeout(function () {
78
+ var cell = $(cm.getWrapperElement()).closest('.cell').data('cell');
79
+ previewMdCell(cell);
80
+ delete cm.state.livemdpreview;
81
+ }, timeout);
82
+ }
83
+ });
84
+ };
85
+
86
+ LiveMdPreviewer.prototype.addCSS = function () {
87
+ var styles_elem = $('#livemdpreviewstyles');
88
+ if (styles_elem.length < 1) {
89
+ styles_elem = $('<style id="livemdpreviewstyles">').appendTo('body');
90
+ }
91
+ var styles = [
92
+ // show rendered stuff even in "unrendered" cell
93
+ '.text_cell.unrendered .text_cell_render { display: block; }',
94
+ ];
95
+ if (this.show_side_by_side) {
96
+ styles.push('.text_cell.unrendered .inner_cell { flex-direction: row !important; }');
97
+ styles.push('.text_cell.unrendered .input_area, .text_cell.unrendered .text_cell_render { width: 50%; }');
98
+ }
99
+ styles_elem.html(styles.join('\n'));
100
+ };
101
+
102
+ /**
103
+ * Export things
104
+ */
105
+ return {
106
+ load_ipython_extension : function () {
107
+ return Jupyter.notebook.config.loaded.then(function () {
108
+ return new LiveMdPreviewer(Jupyter.notebook.config.data.livemdpreview);
109
+ });
110
+ }
111
+ };
112
+ });
.local/share/jupyter/nbextensions/load_tex_macros/load_tex_macros.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Type: Jupyter Notebook Extension
2
+ Name: Load TeX macros
3
+ Description: This extension automatically loads a set of latex commands from the file latexdefs.tex when a notebook is opened.
4
+ Link: readme.md
5
+ Icon: icon.png
6
+ Main: main.js
7
+ Compatibility: 4.x, 5.x
.local/share/jupyter/nbextensions/move_selected_cells/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Move selected cells
2
+
3
+ This is a quick (and dirty) extension - move up or down several selected cell*s*. Moving cells or series of cells via simple keystrokes can be super useful.
4
+ Note: Alternatively, it is now possible to use the `keyboard_shortcut_editor` to bind the move cell up & move cell down actions to Alt-up and Alt-down (or anything else).
5
+
6
+ Initial version for Jupyter 4.0: a bit dirty because it would be better to act on DOM elements and write a correct move_cells() function.
7
+ New version, updated to Jupyter 4.2+, now takes advantage of `Jupyter.notebook.move_selection_{down, up}` new functions
8
+
9
+
10
+ Keyboard shortcuts: *Alt-up* and *Alt-down* (works also with single cells!)
11
+
12
+ **Cell selection**: Cells can be selected using the rubberband (if this extension is enabled) or via Shift-up/Shift-down or Shift-K/Shift-J
.local/share/jupyter/nbextensions/move_selected_cells/main.js ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Jupyter-Contrib Team.
2
+ // Distributed under the terms of the Modified BSD License.
3
+
4
+ // This is a quick (and dirty) extension - move up or down several selected cells
5
+ // Dirty because it would be better to act on dom elements and write a correct
6
+ // move_cells() function.
7
+ // Updated to Jupyter 4.2+, taking advantage of
8
+ // `Jupyter.notebook.move_selection_{down, up}` new functions
9
+ //
10
+ // Keyboard shortcuts: Alt-up and Alt-down (works with single cells also -- this is useful!)
11
+ // Cells can be selected using the rubberband (needs rubberband extension) or via Shift-up/Shift-down or Shift-K/Shift-J
12
+
13
+
14
+ define([
15
+ 'base/js/namespace',
16
+ 'jquery',
17
+ 'require',
18
+ 'base/js/events'
19
+ ], function(Jupyter, $, requirejs, events, rubberband) {
20
+ "use strict";
21
+
22
+ if (parseFloat(Jupyter.version.substr(0, 3)) >= 4.2) {
23
+ var add_cmd_shortcuts = {
24
+ 'Alt-down': {
25
+ help: 'Move selected cells down',
26
+ help_index: 'ht',
27
+ handler: function() { Jupyter.notebook.move_selection_down() }
28
+ },
29
+ 'Alt-up': {
30
+ help: 'Move selected cells up',
31
+ help_index: 'ht',
32
+ handler: function() { Jupyter.notebook.move_selection_up() }
33
+ }
34
+ }
35
+
36
+ } else { // Jupyter version < 4.2
37
+ var add_cmd_shortcuts = {
38
+ 'Alt-down': {
39
+ help: 'Move selected cells down',
40
+ help_index: 'ht',
41
+ handler: function(event) {
42
+ var ncells = Jupyter.notebook.ncells();
43
+ var s = Jupyter.notebook.get_selected_indices();
44
+ //ensure cells indices are reverse sorted
45
+ var ss = s.sort(function(x, y) {
46
+ return x - y }).reverse();
47
+ if (ss[0] + 1 < ncells) {
48
+ for (var k in ss) {
49
+ Jupyter.notebook.move_cell_down(ss[k]);
50
+ }; //The second loop is needed because move_cell deselect
51
+ for (var k in ss) {
52
+ Jupyter.notebook.get_cell(ss[k] + 1).select();
53
+ }
54
+ }
55
+ }
56
+ },
57
+ 'Alt-up': {
58
+ help: 'Move selected cells up',
59
+ help_index: 'ht',
60
+ handler: function(event) {
61
+ var s = Jupyter.notebook.get_selected_indices();
62
+ //ensure cells indices are sorted
63
+ var ss = s.sort(function(x, y) {
64
+ return x - y });
65
+ if (ss[0] - 1 >= 0) {
66
+ for (var k in ss) {
67
+ Jupyter.notebook.move_cell_up(ss[k]);
68
+ };
69
+ for (var k in ss) {
70
+ Jupyter.notebook.get_cell(ss[k] - 1).select();
71
+ }
72
+ }
73
+ }
74
+ }
75
+ }
76
+ }
77
+
78
+ function load_ipython_extension() {
79
+ Jupyter.keyboard_manager.command_shortcuts.add_shortcuts(add_cmd_shortcuts);
80
+ console.log("[move_selected_cells] loaded")
81
+ }
82
+
83
+ return {
84
+ load_ipython_extension: load_ipython_extension,
85
+ };
86
+
87
+ });
.local/share/jupyter/nbextensions/navigation-hotkeys/icon.png ADDED
.local/share/jupyter/nbextensions/navigation-hotkeys/readme.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Navigation Hotkeys
2
+ ==================
3
+
4
+ Adds new key combinations for easier notebook navigation. (*t*) means that key or category you can toggle on/off in the settings.
5
+
6
+ Edit-mode hotkeys (*t*):
7
+
8
+ * `pageup` - scroll page up
9
+ * `pagedown` - scroll page down
10
+ * `Alt`- `Add` - Split cell and keep cursor position (+ on the keypad)
11
+ * `Alt`- `Subtract` - Combine cell and keep cursor position (- on the keypad)
12
+ * `Alt`-`n` - Toggle line number display in current codecell
13
+ * `Ctrl`-`y` - toggle celltype between markdown and code
14
+ * `Shift`-`Enter` - (*t*) Execute cell, goto next cell and stay in edit mode if next cell is a code cell or unrendered markdown cell
15
+ * `Ctrl`-`Enter` - (*t*) Execute cell and stay in edit mode if cell is a code cell
16
+
17
+ Command-mode hotkeys (*t*):
18
+
19
+ * `esc` - (*t*) toggle to edit mode
20
+ * `home` - Go to top of notebook
21
+ * `end` - Go to bottom of notebook
22
+ * `pageup` - scroll page up
23
+ * `pagedown` - scroll page down
24
+
25
+
.local/share/jupyter/nbextensions/nbTranslate/README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # nbTranslate -- helps translate notebooks -- provides multilanguage support
2
+
3
+ This extension
4
+
5
+ - helps convert markdown cells in a notebook from one language to another (optionally using **Google translate**),
6
+ - enables one to selectively display cells from a given language in a multilanguage notebook.
7
+
8
+ This extension allows one to copy the original cell into a new one for editing and translating. Optionally, the cell source text can be piped through `google translate` and the result inserted into a new cell. Basic markdown structures (e.g. bold, italics, lists) are preserved/restored after conversion, though this is not perfect (usually one has to correct the text and structures afterwards). Similarly, equations are extracted before conversion and restored in the result. It seems that when translating from one language to another language, the best results are obtained by using English as an intermediate language.
9
+
10
+ Metadata indicating the language used is added to each cell. This allows one to selectively display cells for a particular language and hide the other ones. Code cells remain untouched. This way, one can get a kind of multilanguage notebook. A menu is provided to select the languages to display in the notebook.
11
+
12
+
13
+ ![](demo1.gif)
14
+
15
+ ![](demo2.gif)
16
+
17
+
18
+ ## Compatibility
19
+
20
+ The extension has been written to play nicely with
21
+ - [latex_envs]: LaTeX environments are protected before conversion and restored after. For environments with text content, e.g. theorem, remark, etc, the content is still translated. Some minor updates have been applied to `latex_envs` to ensure the best compatibilty; update if necessary via
22
+ ```
23
+ pip install jupyter_latex_envs --upgrade [--user|sys-prefix]
24
+ jupyter nbextension install --py latex_envs --user
25
+ jupyter nbextension enable latex_envs --user --py
26
+ ```
27
+ - [toc2]: cells of non displayed languages are hidden and not rendered so that the toc corresponds only to the selected languages. The toc is automatically updated each time a language is added or removed.
28
+
29
+ ## Configuration
30
+ - Parameter values can be changed using the `nbextensions-configurator`: it is possible to choose the initial source and target language, to choose to use the google translate engine or not, and to specify the initially displayed languages, the position of the language selection menu, and a keyboard shortcut definition.
31
+ - A *configuration toolbar* is provided which allows changing the main options per notebook. One can also toggle whether to use the google translate engine, select the source and target languages, and select the language to display.
32
+
33
+ ## Export
34
+ It is possible to extract one language from the multilanguage notebook. An exporter with an entry-point `selectLanguage` is provided that converts the notebook into another one as follows:
35
+ ```
36
+ jupyter nbconvert --to selectLanguage --NotebookLangExporter.language=lang FILE.ipynb
37
+ ```
38
+ where the `lang` parameter denotes a valid language abbreviation e.g. en, fr, ar, sp. See the full list <a href='languages.js'> here.</a>
39
+
40
+
41
+ Installation
42
+ ------------
43
+
44
+ If you use [jupyter-contrib-nbextensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions), the extension is provided within the `Nbextensions` configuration.
45
+
46
+ Otherwise, you can still install the extension from my personal repo, using:
47
+ ```
48
+ jupyter nbextension install https://rawgit.com/jfbercher/jupyter_nbTranslate/master/nbTranslate.zip --user
49
+ jupyter nbextension enable nbTranslate/main
50
+ ```
51
+ _Note: Until further notice, installing from this repo does not install the python module and add the entry points for exporting as described above._
52
+
53
+ To uninstall:
54
+ ```
55
+ jupyter nbextension uninstall nbTranslate/main
56
+ ```
.local/share/jupyter/nbextensions/nbTranslate/demo1.gif ADDED

Git LFS Details

  • SHA256: d5a8c90d8375a3a4fb62dee376de91281b96c7616d3763eeaba9cc1d4f9c1f9c
  • Pointer size: 132 Bytes
  • Size of remote file: 2.34 MB
.local/share/jupyter/nbextensions/nbTranslate/demo2.gif ADDED

Git LFS Details

  • SHA256: c27adeaf2ba905566502c3f319f1d01e59a3683c77392d890c24a4532396884c
  • Pointer size: 132 Bytes
  • Size of remote file: 3.1 MB
.local/share/jupyter/nbextensions/nbTranslate/main.js ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Jupyter-Contrib Team.
2
+ // Distributed under the terms of the Modified BSD License.
3
+ // Author: Jean-François Bercher
4
+
5
+ define([
6
+ 'module',
7
+ 'require',
8
+ 'jquery',
9
+ 'base/js/namespace',
10
+ './nbTranslate',
11
+ './mutils',
12
+ ], function(
13
+ module,
14
+ requirejs,
15
+ $,
16
+ Jupyter,
17
+ nbt,
18
+ mutils
19
+ ) {
20
+ 'use strict';
21
+
22
+ var sourceLang;
23
+ var targetLang;
24
+ var displayLangs;
25
+ var useGoogleTranslate;
26
+
27
+ var add_edit_shortcuts = {};
28
+ var log_prefix = '[' + module.id + '] ';
29
+
30
+ // default config (updated on nbextension load)
31
+ var conf = {
32
+ hotkey: 'alt-t',
33
+ sourceLang: 'en',
34
+ targetLang: 'fr',
35
+ displayLangs: ['*'],
36
+ langInMainMenu: true,
37
+ useGoogleTranslate: true
38
+ }
39
+
40
+
41
+ function initialize(conf) {
42
+ Jupyter.notebook.config.loaded.then(function config_loaded_callback() {
43
+ // config may be specified at system level or at document level.
44
+ // first, update defaults with config loaded from server
45
+ conf = $.extend(false, {}, conf, Jupyter.notebook.config.data.nbTranslate);
46
+ // then update cfg with any found in current notebook metadata
47
+ // and save in nb metadata (then can be modified per document)
48
+ conf = Jupyter.notebook.metadata.nbTranslate = $.extend(false, {}, conf,
49
+ Jupyter.notebook.metadata.nbTranslate);
50
+ //conf.displayLangs = Jupyter.notebook.metadata.nbTranslate.displayLangs = $.makeArray($.extend(true, {}, conf.displayLangs, Jupyter.notebook.metadata.nbTranslate.displayLangs));
51
+ // other initializations
52
+ sourceLang = conf.sourceLang;
53
+ targetLang = conf.targetLang;
54
+ displayLangs = conf.displayLangs;
55
+ useGoogleTranslate = conf.useGoogleTranslate;
56
+ // then
57
+ translateHotkey(conf);
58
+ showToolbar();
59
+ main_function(conf);
60
+ buildTranslateToolbar();
61
+ })
62
+ return conf
63
+ }
64
+
65
+
66
+ function showToolbar() {
67
+ if ($('#showToolbar').length == 0) {
68
+ $(Jupyter.toolbar.add_buttons_group([
69
+ Jupyter.keyboard_manager.actions.register({
70
+ 'help' : 'Translate current cell',
71
+ 'icon' : 'fa-language',
72
+ 'handler': translateCurrentCell,
73
+ }, 'translate-cell', 'nbTranslate'),
74
+ Jupyter.keyboard_manager.actions.register({
75
+ 'help' : 'nbTranslate: Configuration (toggle toolbar)',
76
+ 'icon' : 'fa-wrench',
77
+ 'handler': translateToolbarToggle //translateToolbar
78
+ }, 'show-nbTranslate-toolbar', 'nbTranslate'),
79
+ ])).find('.btn').eq(0).attr('id', 'showToolbar');
80
+ }
81
+ }
82
+
83
+ function translateHotkey(conf) {
84
+ add_edit_shortcuts[conf['hotkey']] = {
85
+ help: "Translate current cell",
86
+ help_index: 'yf',
87
+ handler: translateCurrentCell
88
+ };
89
+ Jupyter.keyboard_manager.edit_shortcuts.add_shortcuts(add_edit_shortcuts);
90
+ Jupyter.keyboard_manager.command_shortcuts.add_shortcuts(add_edit_shortcuts);
91
+ }
92
+
93
+ function main_function(conf) {
94
+ //alert(log_prefix+" main_function output")
95
+ show_mdcells(conf.displayLangs);
96
+ // add the targetLang to the list of langs, if not already present
97
+ if (listOfLangsInNotebook.indexOf(conf.targetLang) == -1) {
98
+ listOfLangsInNotebook.push(targetLang);
99
+ }
100
+ // add the sourceLang to the list of langs, if not already present
101
+ if (listOfLangsInNotebook.indexOf(conf.sourceLang) == -1) {
102
+ listOfLangsInNotebook.push(sourceLang);
103
+ }
104
+ // Display only the langs present in notebook
105
+ if (conf.displayLangs.indexOf('*') == -1) {
106
+ conf.displayLangs = $.makeArray($(listOfLangsInNotebook).filter(conf.displayLangs));
107
+ }
108
+ else {
109
+ conf.displayLangs = ['*'];
110
+ }
111
+ // console.log(log_prefix, "Displaying langs ", conf.displayLangs);
112
+
113
+ }
114
+
115
+ function load_notebook_extension() {
116
+
117
+ // Wait for the notebook to be fully loaded
118
+ if (Jupyter.notebook._fully_loaded) {
119
+ // this tests if the notebook is fully loaded
120
+ console.log(log_prefix + "Notebook fully loaded -- nbTranslate initialized ")
121
+ conf = initialize(conf);
122
+ } else {
123
+ console.log(log_prefix + "Waiting for notebook availability")
124
+ $([Jupyter.events]).on("notebook_loaded.Notebook", function() {
125
+ console.log(log_prefix + "nbTranslate initialized (via notebook_loaded)")
126
+ conf = initialize(conf);
127
+ })
128
+ }
129
+
130
+ }
131
+
132
+
133
+ return {
134
+ load_ipython_extension: load_notebook_extension
135
+ };
136
+ });
.local/share/jupyter/nbextensions/nbTranslate/nbTranslate.js ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Jupyter-Contrib Team.
2
+ // Distributed under the terms of the Modified BSD License.
3
+ // Author: Jean-François Bercher
4
+
5
+
6
+ //var config_toolbar_present = false;
7
+ var listOfLangsInNotebook = [];
8
+ var log_prefix = '[nbTranslate] ';
9
+ var nbTranslate_toolbarNotYetDisplayed = true;
10
+
11
+ var langs = {
12
+ 'auto': 'Automatic',
13
+ 'af': 'Afrikaans',
14
+ 'sq': 'Albanian',
15
+ 'ar': 'Arabic',
16
+ 'hy': 'Armenian',
17
+ 'az': 'Azerbaijani',
18
+ 'eu': 'Basque',
19
+ 'be': 'Belarusian',
20
+ 'bn': 'Bengali',
21
+ 'bs': 'Bosnian',
22
+ 'bg': 'Bulgarian',
23
+ 'ca': 'Catalan',
24
+ 'ceb': 'Cebuano',
25
+ 'ny': 'Chichewa',
26
+ 'zh-cn': 'Chinese Simplified',
27
+ 'zh-tw': 'Chinese Traditional',
28
+ 'co': 'Corsican',
29
+ 'hr': 'Croatian',
30
+ 'cs': 'Czech',
31
+ 'da': 'Danish',
32
+ 'nl': 'Dutch',
33
+ 'en': 'English',
34
+ 'eo': 'Esperanto',
35
+ 'et': 'Estonian',
36
+ 'tl': 'Filipino',
37
+ 'fi': 'Finnish',
38
+ 'fr': 'French',
39
+ 'fy': 'Frisian',
40
+ 'gl': 'Galician',
41
+ 'ka': 'Georgian',
42
+ 'de': 'German',
43
+ 'el': 'Greek',
44
+ 'gu': 'Gujarati',
45
+ 'ht': 'Haitian Creole',
46
+ 'ha': 'Hausa',
47
+ 'haw': 'Hawaiian',
48
+ 'iw': 'Hebrew',
49
+ 'hi': 'Hindi',
50
+ 'hmn': 'Hmong',
51
+ 'hu': 'Hungarian',
52
+ 'is': 'Icelandic',
53
+ 'ig': 'Igbo',
54
+ 'id': 'Indonesian',
55
+ 'ga': 'Irish',
56
+ 'it': 'Italian',
57
+ 'ja': 'Japanese',
58
+ 'jw': 'Javanese',
59
+ 'kn': 'Kannada',
60
+ 'kk': 'Kazakh',
61
+ 'km': 'Khmer',
62
+ 'ko': 'Korean',
63
+ 'ku': 'Kurdish (Kurmanji)',
64
+ 'ky': 'Kyrgyz',
65
+ 'lo': 'Lao',
66
+ 'la': 'Latin',
67
+ 'lv': 'Latvian',
68
+ 'lt': 'Lithuanian',
69
+ 'lb': 'Luxembourgish',
70
+ 'mk': 'Macedonian',
71
+ 'mg': 'Malagasy',
72
+ 'ms': 'Malay',
73
+ 'ml': 'Malayalam',
74
+ 'mt': 'Maltese',
75
+ 'mi': 'Maori',
76
+ 'mr': 'Marathi',
77
+ 'mn': 'Mongolian',
78
+ 'my': 'Myanmar (Burmese)',
79
+ 'ne': 'Nepali',
80
+ 'no': 'Norwegian',
81
+ 'ps': 'Pashto',
82
+ 'fa': 'Persian',
83
+ 'pl': 'Polish',
84
+ 'pt': 'Portuguese',
85
+ 'ma': 'Punjabi',
86
+ 'ro': 'Romanian',
87
+ 'ru': 'Russian',
88
+ 'sm': 'Samoan',
89
+ 'gd': 'Scots Gaelic',
90
+ 'sr': 'Serbian',
91
+ 'st': 'Sesotho',
92
+ 'sn': 'Shona',
93
+ 'sd': 'Sindhi',
94
+ 'si': 'Sinhala',
95
+ 'sk': 'Slovak',
96
+ 'sl': 'Slovenian',
97
+ 'so': 'Somali',
98
+ 'es': 'Spanish',
99
+ 'su': 'Sudanese',
100
+ 'sw': 'Swahili',
101
+ 'sv': 'Swedish',
102
+ 'tg': 'Tajik',
103
+ 'ta': 'Tamil',
104
+ 'te': 'Telugu',
105
+ 'th': 'Thai',
106
+ 'tr': 'Turkish',
107
+ 'uk': 'Ukrainian',
108
+ 'ur': 'Urdu',
109
+ 'uz': 'Uzbek',
110
+ 'vi': 'Vietnamese',
111
+ 'cy': 'Welsh',
112
+ 'xh': 'Xhosa',
113
+ 'yi': 'Yiddish',
114
+ 'yo': 'Yoruba',
115
+ 'zu': 'Zulu'
116
+ };
117
+
118
+ // test if array contains an element
119
+ function inArray(array, element){
120
+ return array.indexOf(element) > -1; // is not supported by old IE but doesn't really matter
121
+ };
122
+
123
+ /*Extend array prototype to include a contains method
124
+ Array.prototype.contains = function(element){
125
+ return this.indexOf(element) > -1; // is not supported by old IE but doesn't really matter
126
+ };
127
+ Array.prototype.addIfNotAlreadyIn = function(element){
128
+ if (this.indexOf(element) == -1){ // is not supported by old IE but doesn't really matter
129
+ this.push(element)}
130
+ };
131
+ */
132
+
133
+ console.log(log_prefix, " Overriding run-range javascript function");
134
+ requirejs("notebook/js/notebook").Notebook.prototype.execute_cell_range = function(start, end) {
135
+ this.command_mode();
136
+ for (var i = start; i < end; i++) {
137
+ this.select(i);
138
+ var c = this.get_selected_cell();
139
+ if (c.element.is(':visible')) {
140
+ this.execute_cell();
141
+ } else {
142
+ //console.log("do nothing for cell",i);
143
+ }
144
+ }
145
+ };
146
+
147
+
148
+ function translateCurrentCell() {
149
+ //alert(log_prefix+" run on action")
150
+ conf = Jupyter.notebook.metadata.nbTranslate
151
+ var cell = Jupyter.notebook.get_selected_cell();
152
+ var cellText = cell.get_text();
153
+ var maths_and_text = removeMaths(cellText)
154
+ var html_and_text = removeHtml(maths_and_text[1])
155
+ var sourceText = html_and_text[1];
156
+ var mdReplacements = {'*': '<*>', '**': '<**>',
157
+ '_': '<_>', '__': '<__>'}
158
+ // **, *, _, and __ in markdown are "protected" with <.>
159
+ // which seems to survive the google translation -- not always, actually
160
+ sourceText = sourceText.replace(/([\*|_]{1,2})([\s\S]*?)\1/g,
161
+ function(m0,m1,m2){return mdReplacements[m1]+m2+mdReplacements[m1]})
162
+ cell.metadata.lang = conf.sourceLang;
163
+ var translated_text = "";
164
+ if (conf.useGoogleTranslate) {
165
+ var url = "https://translate.googleapis.com/translate_a/single?client=gtx&sl=" + conf.sourceLang + "&tl=" + conf.targetLang + "&dt=t&q=" + encodeURIComponent(sourceText);
166
+ var result = $.get(url)
167
+ .done(function(data, text, obj) {
168
+ if (obj.status == 200) {
169
+ var translated_text = processGoogleTranslateResponse(obj.responseJSON);
170
+
171
+ } else {
172
+ var translated_text = sourceText;
173
+ }
174
+
175
+ translated_text = restoreHtml([html_and_text[0], translated_text])
176
+ translated_text = restoreMaths([maths_and_text[0], translated_text])
177
+ translated_text =
178
+ translated_text.replace(/\\label{([\s\S]*?)}/g, function(m0,m1){return "\\label{"+m1+"-"+conf.targetLang+"}"})
179
+ .replace(/\\ref{([\s\S]*?)}/g, function(m0,m1){return "\\ref{"+m1+"-"+conf.targetLang+"}"})
180
+ insertTranslatedCell(translated_text, cell.rendered)
181
+ })
182
+ } else {
183
+ insertTranslatedCell(cellText, cell.rendered)
184
+ }
185
+ }
186
+
187
+
188
+ function processGoogleTranslateResponse(responseJSON) {
189
+ var translated_text = "";
190
+
191
+ var list_paragraphs = responseJSON[0]
192
+ //var list_paragraphs = data.responseText.match(/\[\"([\S\s]*?)\",/g)
193
+
194
+ list_paragraphs.forEach(
195
+ function(elt) {
196
+ translated_text += elt[0] //.substring(2, elt.length - 2)
197
+ })
198
+ translated_text = translated_text.replace(/([^\\])\\n/g, "$1\n").replace(/([^\\])\\n/g, "$1\n")
199
+ .replace(/\\\\/g, "\\") // unescape
200
+ .replace(/\\"/g, '"') // replace double quotes
201
+ .replace(/\\u003c([\*|_|@]{1,2})\\u003e\s*([\s\S]*?)\s*\\u003c\1\\u003e/g, function(m0,m1,m2){return m1+m2+m1})
202
+ .replace(/<([\*|_|@]{1,2})>\s*([\s\S]*?)\s*<\1>/g, function(m0,m1,m2){return m1+m2+m1})
203
+
204
+ /*for (item in mdReplacements) {
205
+ var pattern = new RegExp(mdReplacements[item], 'gmi');
206
+ translated_text = translated_text.replace(pattern, item);
207
+ }*/
208
+
209
+ // Remove spurious md remaining
210
+ translated_text = translated_text.replace(/\\u003c([\*|_|@]{1,2})\\u003e/g, "")
211
+ // Remove extra spaces in markdown
212
+ translated_text = translated_text.replace(/([\*|_|@]{1,2})\s*([\s\S]*?)\s*\1/g, function(m0,m1,m2){return m1+m2+m1})
213
+ return translated_text;
214
+ }
215
+
216
+
217
+ function insertTranslatedCell(translated_text, render) {
218
+ conf = Jupyter.notebook.metadata.nbTranslate
219
+ Jupyter.notebook.insert_cell_below("markdown");
220
+ Jupyter.notebook.select_next();
221
+ var new_cell = Jupyter.notebook.get_selected_cell();
222
+ new_cell.set_text(translated_text);
223
+ new_cell.metadata.lang = conf.targetLang;
224
+ if (render) new_cell.render();
225
+ }
226
+
227
+ function show_mdcells(displayLangs) {
228
+ // only show cells with lang or nothing
229
+ var ncells = Jupyter.notebook.ncells();
230
+ var cells = Jupyter.notebook.get_cells();
231
+ var MarkdownCell = requirejs('notebook/js/textcell').MarkdownCell;
232
+ var lastmd_cell;
233
+ for (var i = 0; i < ncells; i++) {
234
+ var cell = cells[i];
235
+ if (cell instanceof MarkdownCell) {
236
+ if (typeof cell.metadata.lang != "undefined") {
237
+ if (!inArray(listOfLangsInNotebook, cell.metadata.lang)){
238
+ listOfLangsInNotebook.push(cell.metadata.lang)
239
+ }
240
+ }
241
+ if (typeof cell.metadata.lang == "undefined" ||
242
+ inArray(displayLangs, cell.metadata.lang) ||
243
+ inArray(displayLangs, '*')) {
244
+ if (!cell.rendered) cell.render();
245
+ cell.element.show();
246
+ lastmd_cell = cell;
247
+ }
248
+ else {
249
+ cell.set_rendered("");
250
+ cell.rendered = false ;
251
+ cell.element.hide();
252
+ }
253
+ }
254
+ }
255
+ // this is to update toc contents.
256
+ if(typeof lastmd_cell !== "undefined") {
257
+ lastmd_cell.rendered = false;
258
+ lastmd_cell.render(); // re-render last md cell and issue rendered.MarkdownCell event
259
+ }
260
+ else { //not a single markdown cell has been rendered
261
+ // add one, render it and delete it.
262
+ if (Jupyter.notebook.ncells()>1){
263
+ var c = Jupyter.notebook.insert_cell_at_bottom('markdown');
264
+ c.render();
265
+ Jupyter.notebook.delete_cell(Jupyter.notebook.ncells()-1);
266
+ }
267
+ }
268
+ }
269
+
270
+ function translateToolbarToggle(){
271
+ if (nbTranslate_toolbarNotYetDisplayed){
272
+ buildTranslateToolbar(); //rebuild it
273
+ nbTranslate_toolbarNotYetDisplayed = false;
274
+ $("#nbTranslate_toolbar").show();
275
+ }
276
+ else
277
+ $("#nbTranslate_toolbar").toggle();
278
+ }
279
+
280
+ function buildTranslateToolbar(callback) {
281
+
282
+ conf = Jupyter.notebook.metadata.nbTranslate
283
+ var config_toolbar_present = $("#nbTranslate_toolbar").length >0;
284
+
285
+ if (config_toolbar_present) {
286
+ $("#nbTranslate_toolbar").remove();
287
+ if ($('#LangSelectionMenu').length > 0) $('#LangSelectionMenu').remove();
288
+ $(site).height($(window).height() - $('#header').height() - $('#footer').height());
289
+ }
290
+ sourceLang = conf.sourceLang;
291
+ targetLang = conf.targetLang;
292
+
293
+ //local to this function
294
+
295
+
296
+ // Defining the toolbar --------------------------------------------------------------
297
+ var nbTranslate_toolbar = $('<div id="nbTranslate_toolbar" \
298
+ class="container edit_mode" style="display: none;">')
299
+
300
+ var vertical_separator = '&nbsp;&nbsp;<span style="display: inline-block; \
301
+ vertical-align:bottom; width: 0; height: 1.8em;border-left:2px solid #cccccc"></span>&nbsp;&nbsp;'
302
+
303
+ var extensionLabel = $('<a/>').html('<b> nbTranslate&nbsp;</b>')
304
+ .attr('title', 'Translate from primary to secondary language')
305
+ .on('click', function(data) {translateCurrentCell() })
306
+ var primaryLangLabel = $('<b/>').html('Primary language&nbsp;')
307
+ var secondaryLangLabel = $('<b/>').html('Secondary language&nbsp;')
308
+ var displayLangLabel = $('<b/>').html('Display&nbsp;')
309
+
310
+ // dropdown menu for parameter selection and toggle
311
+
312
+ var sourceLangChoice = $('<ul/>').attr('id', 'sourceLangChoice')
313
+ .addClass("dropdown-menu")
314
+ .attr('min-width', '250px').css('height','300px').css('overflow', 'auto')
315
+
316
+ for (lang in langs) {
317
+ sourceLangChoice.append($('<li/>').append($('<a/>')
318
+ .attr('id', 'sourceItem_' + langs[lang])
319
+ .data('lang', lang)
320
+ .text(langs[lang])
321
+ .css('width', '175px')
322
+ .attr('href', '#')
323
+ .attr('title', 'Select ' + langs[lang] +
324
+ ' as source language')
325
+ .on('click', function(data) {
326
+ sourceLang = $(this).data('lang');
327
+ conf.sourceLang = sourceLang;
328
+ $('#sourceLangConfig').text(langs[sourceLang])
329
+ $('[id^=sourceItem_]' + '>.fa').toggleClass('fa-check', false) //reset the old one if any
330
+ $('#sourceItem_' + $(this).text() + ' > .fa').toggleClass('fa-check', true)
331
+ $('#displayItem_source').data('lang', sourceLang).text(langs[sourceLang]).prepend($('<i/>').addClass('fa menu-icon pull-right')).attr('title', 'Display ' + langs[sourceLang])
332
+ })
333
+ .prepend($('<i/>').addClass('fa menu-icon pull-right'))
334
+ ))
335
+ }
336
+
337
+ var sourceLangMenu = $('<div/>').attr('id', 'cfgby').addClass('btn-group')
338
+ .attr('title', 'Select source language')
339
+ .append($('<a/>')
340
+ .attr('id', "sourceLangConfig")
341
+ .addClass("btn btn-default")
342
+ .append($('<i/>')
343
+ .addClass("fa fa-wrench fa-fw"))
344
+ .text(langs[sourceLang])
345
+ )
346
+ .append($('<a/>')
347
+ .addClass("btn btn-default dropdown-toggle")
348
+ .attr('data-toggle', "dropdown")
349
+ .attr('href', "#")
350
+ .append($('<span/>').addClass("fa fa-caret-down")))
351
+ .append(sourceLangChoice)
352
+
353
+
354
+ // target language menu
355
+
356
+ var targetLangChoice = $('<ul/>').attr('id', 'targetLangChoice').addClass("dropdown-menu")
357
+ .attr('min-width', '250px').css('height','300px').css('overflow', 'auto')
358
+
359
+ var listOfLangItems = $('<li/>')
360
+ for (lang in langs) {
361
+ targetLangChoice.append($('<li/>').append($('<a/>')
362
+ .attr('id', 'targetItem_' + lang)
363
+ .data('lang', lang)
364
+ .text(langs[lang])
365
+ .css('width', '175px')
366
+ .attr('href', '#')
367
+ .attr('title', 'Select ' + langs[lang] +
368
+ ' as target language')
369
+ .on('click', function(data) {
370
+ targetLang = $(this).data('lang');
371
+ conf.targetLang = targetLang;
372
+ $('#targetLangConfig').text(langs[targetLang])
373
+ $('[id^=targetItem_]' + '>.fa').toggleClass('fa-check', false) //reset the old one if any
374
+ $($(this).id + ' > .fa').toggleClass('fa-check', true)
375
+ $('#displayItem_target').data('lang', targetLang).text(langs[targetLang]).prepend($('<i/>').addClass('fa menu-icon pull-right')).attr('title', 'Display ' + langs[targetLang])
376
+ // add targetLang to display menu
377
+ // out = $('#displayLangChoice > li a').map(function (idx, elt) {return $(elt).data('lang')})
378
+ if (listOfLangsInNotebook.indexOf(targetLang)==-1){
379
+ listOfLangsInNotebook.push(targetLang);
380
+ conf.displayLangs.push(targetLang);
381
+ $('#displayLangChoice').append(addLangToDisplayLangChoice(targetLang));
382
+ }
383
+ $('#displayItem_' + targetLang + ' > .fa').toggleClass('fa-check',true)
384
+ var index = conf.displayLangs.indexOf('*');
385
+ if (index > -1) {
386
+ conf.displayLangs.splice(index, 1);
387
+ $('#displayItem_all > .fa').toggleClass('fa-check', false)
388
+ }
389
+ show_mdcells(conf.displayLangs)
390
+ })
391
+ .prepend($('<i/>').addClass('fa menu-icon pull-right'))
392
+ ))
393
+ }
394
+
395
+ var targetLangMenu = $('<div/>').attr('id', 'cfgby').addClass('btn-group')
396
+ .attr('title', 'Select target language')
397
+ .append($('<a/>')
398
+ .attr('id', "targetLangConfig")
399
+ .addClass("btn btn-default")
400
+ .append($('<i/>')
401
+ .addClass("fa fa-wrench fa-fw"))
402
+ .text(langs[targetLang])
403
+ )
404
+ .append($('<a/>')
405
+ .addClass("btn btn-default dropdown-toggle")
406
+ .attr('data-toggle', "dropdown")
407
+ .attr('href', "#")
408
+ .append($('<span/>').addClass("fa fa-caret-down")))
409
+ .append(targetLangChoice
410
+ .append(listOfLangItems)
411
+ )
412
+
413
+
414
+ // Display language menu
415
+
416
+ function onClickedLangChoice(data) {
417
+ var lang = $(this).data('lang');
418
+ $('#displayItem_' + lang + ' > .fa').toggleClass('fa-check')
419
+ if ($('#displayItem_' + lang + ' > .fa').hasClass('fa-check')) {
420
+ if (conf.displayLangs.indexOf(lang) == -1) conf.displayLangs.push(lang);
421
+ $('#displayItem_all > .fa').toggleClass('fa-check', false)
422
+ var index = conf.displayLangs.indexOf('*');
423
+ if (index > -1) {
424
+ conf.displayLangs.splice(index, 1);
425
+ }
426
+ } else {
427
+ var index = conf.displayLangs.indexOf(lang);
428
+ if (index > -1) {
429
+ conf.displayLangs.splice(index, 1);
430
+ }
431
+ }
432
+ $('#displayLangConfig').text('Select')
433
+ // console.log("displayLangs", displayLangs)
434
+ show_mdcells(conf.displayLangs)
435
+ }
436
+
437
+
438
+ function addLangToDisplayLangChoice(lang){
439
+ if (typeof lang === 'string' || lang instanceof String)
440
+ return $('<li/>').append($('<a/>')
441
+ .attr('id', 'displayItem_'+lang)
442
+ .data('lang', lang)
443
+ .text(langs[lang])
444
+ .css('width', '175px')
445
+ .attr('href', '#')
446
+ .attr('title', 'Display ' + langs[lang])
447
+ .on('click', onClickedLangChoice)
448
+ .prepend($('<i/>').addClass('fa menu-icon pull-right'))
449
+ )
450
+ }
451
+
452
+ var displayLangChoice = $('<ul/>')
453
+ .attr('id', 'displayLangChoice')
454
+ .addClass("dropdown-menu")
455
+ .attr('min-width', '300px')
456
+
457
+ var allLangs = $('<li/>')
458
+ allLangs.attr('id', 'allLangs')
459
+ .append($('<a/>')
460
+ .attr('id', 'displayItem_all')
461
+ .data('lang', '*')
462
+ .text('All')
463
+ .css('width', '175px')
464
+ .attr('href', '#')
465
+ .attr('title', 'Display all languages')
466
+ .on('click', function(data) {
467
+ conf.displayLangs = ['*'];
468
+ $('#displayLangConfig').text('All')
469
+ $('[id^=displayItem_]' + '>.fa').toggleClass('fa-check', false) //reset the old one if any
470
+ $('#displayItem_all > .fa').toggleClass('fa-check', true)
471
+ show_mdcells(conf.displayLangs)
472
+ })
473
+ .prepend($('<i/>').addClass('fa menu-icon pull-right'))
474
+ )
475
+
476
+ //console.log("List of langs", listOfLangsInNotebook)
477
+ for (var langIndex in listOfLangsInNotebook) {
478
+ var lang = listOfLangsInNotebook[langIndex]
479
+ //console.log("lang",lang)
480
+ if (typeof lang === 'string' || lang instanceof String){
481
+ displayLangChoice.append(
482
+ addLangToDisplayLangChoice(listOfLangsInNotebook[langIndex]))}
483
+ }
484
+
485
+ var displayLangMenu = $('<div/>').attr('id', 'cfgby').addClass('btn-group')
486
+ .attr('title', 'Select language to display')
487
+ .append($('<a/>')
488
+ .attr('id', "displayLangConfig")
489
+ .addClass("btn btn-default")
490
+ .append($('<i/>')
491
+ .addClass("fa fa-wrench fa-fw"))
492
+ .text(inArray(conf.displayLangs, '*') ? 'All' : 'Select')
493
+ )
494
+ .append($('<a/>')
495
+ .addClass("btn btn-default dropdown-toggle")
496
+ .attr('data-toggle', "dropdown")
497
+ .attr('href', "#")
498
+ .append($('<span/>').addClass("fa fa-caret-down")))
499
+ .append(displayLangChoice.prepend(allLangs))
500
+
501
+
502
+ // Refresh display in notebook
503
+ var refresh_languages_display_button = $("<a/>")
504
+ .addClass("btn btn-default")
505
+ .attr('href', "#")
506
+ .attr('title', 'Refresh languages display in notebook')
507
+ .css('color', 'black')
508
+ .attr('id', 'refreshLanguagesDisplay')
509
+ .append($("<i/>").addClass('fa fa-refresh'))
510
+ .on('click', function() {
511
+ show_mdcells(conf.displayLangs)
512
+ })
513
+
514
+ // close button
515
+ var suicide_button = $("<a/>")
516
+ .addClass("btn btn-default")
517
+ .attr('href', "#")
518
+ .attr('title', 'Close nbTranslate toolbar')
519
+ .css('float', 'right')
520
+ .attr('id', 'suicide')
521
+ .attr('title', 'Close the nbTranslate configuration toolbar')
522
+ .append($("<i/>").addClass('fa fa-power-off'))
523
+ .on('click', function() {
524
+ translateToolbarToggle();
525
+ return
526
+ })
527
+
528
+
529
+ // translateButton
530
+ var translate_button = $("<a/>")
531
+ .addClass("btn btn-default")
532
+ .attr('href', "#")
533
+ .attr('title', 'Translate current cell')
534
+ .attr('id', 'translateButton')
535
+ .html('<b> nbTranslate&nbsp;</b>')
536
+ .on('click', function(data) {translateCurrentCell() })
537
+
538
+ // Enable Google Engine button
539
+ var useGoogleTranslateButton = $("<a/>")
540
+ .addClass("btn btn-default")
541
+ .attr('href', "#")
542
+ .attr('title', 'Use Google translate')
543
+ .attr('id', 'useGoogleTranslateButton')
544
+ .append($("<i/>").addClass(conf.useGoogleTranslate ? 'fa fa-check-square-o' : 'fa fa-square-o'))
545
+ .on('click', function() { $('#useGoogleTranslateButton > .fa').toggleClass("fa-square-o fa-check-square-o");
546
+ conf.useGoogleTranslate = !conf.useGoogleTranslate })
547
+
548
+ // Lang Menu
549
+ var langMenu = $('<a/>').attr('href', '#')
550
+ .addClass('dropdown-toogle')
551
+ .attr('data-toggle', "dropdown")
552
+ .attr('aria-expanded', "false")
553
+ .text("Langs")
554
+ .attr('title', 'Languages to display in notebook (nbTranslate extension)')
555
+
556
+ // Finally the toolbar itself
557
+ nbTranslate_toolbar.append(translate_button)
558
+ .append(vertical_separator)
559
+ .append(useGoogleTranslateButton)
560
+ .append(vertical_separator)
561
+ .append(primaryLangLabel)
562
+ .append(sourceLangMenu)
563
+ .append(vertical_separator)
564
+ .append(secondaryLangLabel)
565
+ .append(targetLangMenu)
566
+ if (conf.langInMainMenu) {
567
+ $('#kernel_menu').parent().after('<li id="LangSelectionMenu"/>')
568
+ $('#LangSelectionMenu').addClass('dropdown')
569
+ .append(langMenu).append(displayLangChoice)
570
+ }
571
+ else{
572
+ nbTranslate_toolbar.append(vertical_separator)
573
+ .append(displayLangLabel)
574
+ .append(displayLangMenu)
575
+ }
576
+ nbTranslate_toolbar.append('&nbsp;&nbsp;')
577
+ .append(refresh_languages_display_button)
578
+ .append(suicide_button)
579
+
580
+ // Appending the new toolbar to the main one
581
+ $('head').append('<style> input:focus {border-color: #66afe9;\
582
+ outline: 0; box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px \
583
+ rgba(102, 175, 233, 0.6);}</style>')
584
+
585
+ $("#maintoolbar-container").append(nbTranslate_toolbar);
586
+ $("#nbTranslate_toolbar").css({ 'padding': '5px' });
587
+
588
+
589
+ // Initializing toogles checks
590
+ $('#sourceItem_' + langs[sourceLang] + ' > .fa').toggleClass('fa-check', true)
591
+ $('#targetItem_' + langs[targetLang] + ' > .fa').toggleClass('fa-check', true)
592
+
593
+ for (var langIndex in conf.displayLangs) {
594
+ var lang = conf.displayLangs[langIndex];
595
+ if (typeof lang === 'string' || lang instanceof String){
596
+ $('#displayItem_' + lang + ' .fa')
597
+ .toggleClass('fa-check', true)
598
+ }
599
+ if (conf.displayLangs.indexOf('*')> -1)
600
+ $('#displayItem_all > .fa').toggleClass('fa-check', true)
601
+ }
602
+ }
603
+
604
+ /*
605
+ function create_lang_menu(callback) {
606
+
607
+ if ($('#LangSelectionMenu').length > 0) {
608
+ return;
609
+ }
610
+ var displayLangChoiceClone = $('#displayLangChoice').clone()
611
+
612
+ $('#help_menu').parent().before('<li id="LangSelectionMenu"/>')
613
+ $('#LangSelectionMenu').addClass('dropdown')
614
+ .append($('<a/>').attr('href', '#')
615
+ .addClass('dropdown-toogle')
616
+ .attr('data-toggle', "dropdown")
617
+ .attr('aria-expanded', "false")
618
+ .text("Langs"))
619
+ .append(displayLangChoiceClone)
620
+ }
621
+
622
+ */
.local/share/jupyter/nbextensions/notify/notification.png ADDED
.local/share/jupyter/nbextensions/notify/notify.js ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+
3
+ *************************
4
+ Display Web Notifications
5
+ *************************
6
+
7
+ Add this file to $(ipython locate)/nbextensions/
8
+
9
+ */
10
+
11
+ define([
12
+ "jquery",
13
+ "base/js/namespace",
14
+ "require",
15
+ ], function ($, Jupyter, requirejs) {
16
+ "use strict";
17
+
18
+ var params = {
19
+ sticky: false,
20
+ play_sound: false
21
+ };
22
+ var audio_file = "./notify.mp3";
23
+
24
+ var current_time = function() {
25
+ return new Date().getTime() / 1000;
26
+ };
27
+
28
+ var start_time = current_time();
29
+ var min_time = 0;
30
+ var enabled = false;
31
+ var first_start = true;
32
+ var busy_kernel = true;
33
+
34
+ var add_permissions_list = function () {
35
+ var ipython_toolbar = $('#maintoolbar-container');
36
+ var label = $('<span/>').addClass("navbar-text permissions-list").text('Notify:');
37
+ var select = $('<select/>')
38
+ .attr('id', 'permissions-select')
39
+ .attr('class', 'permissions-list form-control select-xs')
40
+ .append($('<option/>')
41
+ .attr('value', 'Disabled')
42
+ .text('Disabled'));
43
+ ipython_toolbar.append(label).append(select);
44
+ select.change(function() {
45
+ var val = $(this).val();
46
+ if (val == 'Disabled') {
47
+ enabled = false;
48
+ } else {
49
+ enabled = true;
50
+ min_time = val;
51
+ }
52
+ save_state();
53
+ });
54
+ // Add options in addition to the default, 'Disabled'
55
+ // Options give the minimum kernel busy time in seconds after which a notification is displayed
56
+ var presets = [0, 5, 10, 30];
57
+ for (var i=0; i<presets.length; i++) {
58
+ var name = presets[i];
59
+ select.append($('<option/>').attr('value', name).text(name));
60
+ }
61
+ // Finally, restore the selected option if it was saved in notebook metadata
62
+ restore_state();
63
+ };
64
+
65
+ var add_permissions_button = function () {
66
+ if ($("#permissions-button").length === 0) {
67
+ $(Jupyter.toolbar.add_buttons_group([
68
+ Jupyter.keyboard_manager.actions.register ({
69
+ 'help' : 'Grant Notification Permissions',
70
+ 'icon' : 'fa-check',
71
+ 'handler': ask_permission,
72
+ },'grant-notifications-permission', 'notify')
73
+ ])).find('.btn').attr('id', 'permissions-button');
74
+ }
75
+ };
76
+
77
+ var ensure_permission = function () {
78
+ ask_permission(); // Asks for permission on notebook load, doesn't work in Chrome
79
+ // If don't have permission now, add a button to the toolbar to let user request permission
80
+ if (Notification && Notification.permission !== "granted") {
81
+ add_permissions_button();
82
+ add_permissions_list();
83
+ $(".permissions-list").hide();
84
+ } else if (Notification && Notification.permission === "granted") {
85
+ add_permissions_list();
86
+ }
87
+ };
88
+
89
+ var ask_permission = function () {
90
+ if (Notification && Notification.permission !== "granted") {
91
+ Notification.requestPermission(function (status) {
92
+ if (Notification.permission !== status) {
93
+ Notification.permission = status;
94
+ }
95
+ // Wait for permission to be granted, then remove the permissions-button and show permissions-list
96
+ if (Notification && Notification.permission === "granted" && $("#permissions-button").length > 0) {
97
+ $("#permissions-button").remove();
98
+ $(".permissions-list").show();
99
+ }
100
+ });
101
+ }
102
+ };
103
+
104
+ var play_notification_sound = function(opts) {
105
+ /**
106
+ * NB: the Web Notification API specifies a mechanism for playing sound
107
+ * with notifications. As of 2017-08-22, it is unsupported in all browsers.
108
+ * This is a workaround. It should be updated to an implementation like
109
+ * this when browser support is available:
110
+ *
111
+ * opts["sound"] = requirejs.toUrl(audio_file);
112
+ */
113
+ try {
114
+ var audio = new Audio(requirejs.toUrl(audio_file));
115
+ audio.play();
116
+ } catch(e) {
117
+ console.log('HTML5 Audio not supported in browser.');
118
+ }
119
+ };
120
+
121
+ var notify = function () {
122
+ var elapsed_time = current_time() - start_time;
123
+ if (enabled && !first_start && !busy_kernel && elapsed_time >= min_time) {
124
+ var opts = {
125
+ body: "Kernel is now idle\n(ran for " + Math.round(elapsed_time) + " secs)",
126
+ icon: Jupyter.notebook.base_url + "static/base/images/favicon.ico",
127
+ requireInteraction: params.sticky
128
+ };
129
+ if (params.play_sound) {
130
+ play_notification_sound(opts);
131
+ }
132
+ var n = new Notification(Jupyter.notebook.notebook_name, opts);
133
+ n.onclick = function(event){ window.focus(); }
134
+ }
135
+ if (first_start) {
136
+ first_start = false;
137
+ }
138
+ };
139
+
140
+ var load_state = function () {
141
+ if (!Jupyter.notebook) return;
142
+
143
+ if ("notify_time" in Jupyter.notebook.metadata) {
144
+ min_time = Jupyter.notebook.metadata.notify_time;
145
+ enabled = true;
146
+ }
147
+ };
148
+
149
+ var save_state = function () {
150
+ if (enabled) {
151
+ if (Jupyter.notebook.metadata.notify_time !== min_time) {
152
+ Jupyter.notebook.metadata.notify_time = min_time;
153
+ Jupyter.notebook.set_dirty();
154
+ }
155
+ } else {
156
+ if (Jupyter.notebook.metadata.hasOwnProperty('notify_time')) {
157
+ delete Jupyter.notebook.metadata.notify_time;
158
+ Jupyter.notebook.set_dirty();
159
+ }
160
+ }
161
+ };
162
+
163
+ var restore_state = function () {
164
+ load_state();
165
+ // Only proceed if the permissions selector is being shown
166
+ if ($("#permissions-select").length > 0) {
167
+ if (!enabled) {
168
+ $("#permissions-select").val("Disabled");
169
+ } else {
170
+ $("#permissions-select").val(min_time);
171
+ }
172
+ }
173
+ };
174
+
175
+ var setup_notifier = function () {
176
+ $([Jupyter.events]).on('kernel_starting.Kernel',function () {
177
+ first_start = true; // reset first_start status when restarting the kernel
178
+ });
179
+
180
+ $([Jupyter.events]).on('kernel_busy.Kernel',function () {
181
+ busy_kernel = true;
182
+ start_time = current_time(); // reset the timer
183
+ });
184
+
185
+ $([Jupyter.events]).on('kernel_idle.Kernel',function () {
186
+ busy_kernel = false; // Used to make sure that kernel doesn't go busy again within the timeout set below.
187
+ setTimeout(notify, 500);
188
+ });
189
+ };
190
+
191
+ var load_ipython_extension = function () {
192
+ return Jupyter.notebook.config.loaded.then(function() {
193
+ $.extend(true, params, Jupyter.notebook.config.data.notify);
194
+ ensure_permission();
195
+ setup_notifier();
196
+ });
197
+ };
198
+
199
+ return {
200
+ load_ipython_extension : load_ipython_extension
201
+ };
202
+
203
+ });
.local/share/jupyter/nbextensions/notify/notify.mp3 ADDED
Binary file (13.9 kB). View file
 
.local/share/jupyter/nbextensions/notify/notify.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Type: IPython Notebook Extension
2
+ Name: Notify
3
+ Description: >
4
+ Show a browser notification when kernel becomes idle again after being busy
5
+ for some time - configurable after 0, 5, 10, or 30 seconds busy.
6
+ Link: readme.md
7
+ Icon: notification.png
8
+ Main: notify.js
9
+ Compatibility: 4.x, 5.x
10
+ Parameters:
11
+ - name: notify.sticky
12
+ description: Require interactions on notifications to dismiss them. (Chrome only)
13
+ input_type: checkbox
14
+ default: false
15
+ - name: notify.play_sound
16
+ description: Play notification sound.
17
+ input_type: checkbox
18
+ default: false
.local/share/jupyter/nbextensions/notify/readme.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Notebook web notifications
2
+
3
+ Jupyter notebook extension to display a web notification to notify you when the
4
+ kernel becomes idle.
5
+ This can be useful when running tasks that take more than a couple of seconds
6
+ to complete.
7
+
8
+ The extension has been tested with the most recent versions of Firefox, Chrome
9
+ and Safari.
10
+
11
+ Initially, a button to request notification permissions is shown in the toolbar.
12
+ After notification permissions have been granted, this button is replaced by a
13
+ dropdown menu with five choices: Disabled, 0, 5, 10 and 30.
14
+ To activate notifications, select a minimum kernel busy time required to
15
+ trigger a notification (e.g. if selecting 5, a notification will only be shown
16
+ if the kernel was busy for more than 5 seconds). The selection is saved in the
17
+ notebook's metadata and restored when the notebook is re-opened.
18
+
19
+ You may configure the plugin so that notifications require manual dismissal
20
+ before disappearing. Browser support is limited, see
21
+ [here](https://developer.mozilla.org/en-US/docs/Web/API/notification/requireInteraction)
22
+ to check if your browser supports this. You may also configure the plugin so
23
+ that notifications play a sound.
24
+
25
+ ![notification](notification.png "notification")
26
+
27
+
28
+ ## Original Source
29
+ This extension originally comes from [@sjpfenniger](https://github.com/sjpfenninger)'s [GitHub repository](https://github.com/sjpfenninger/ipython-extensions).
30
+
31
+ ## Credits
32
+
33
+ This extension contains sounds created by RSilveira_88 on fresound.org, licensed
34
+ under the CC-BY 3.0 License. Modifications by morrisjim. You may find the
35
+ modified version [here](https://freesound.org/people/morrisjm/sounds/268756/) and
36
+ the original [here](https://freesound.org/people/RSilveira_88/sounds/216306/).
37
+
38
+ ## License
39
+
40
+ The MIT License (MIT)
41
+
42
+ Copyright (c) 2014 Stefan Pfenninger
43
+
44
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
45
+
46
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
47
+
48
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.