ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
34,462
149,618
590
freqtrade/freqtradebot.py
170
31
def enter_positions(self) -> int: trades_created = 0 whitelist = copy.deepcopy(self.active_pair_whitelist) if not whitelist: logger.info("Active pair whitelist is empty.") return trades_created # Remove pairs for currently opened trades from the whitelist for trade in Trade.get_open_trades(): if
Use "side" parameter when calling Pairlocks
enter_positions
737bdfe844e575bdbbc9cd9d2a84291fe2e58300
freqtrade
freqtradebot.py
17
34
https://github.com/freqtrade/freqtrade.git
10
172
0
111
327
Python
{ "docstring": "\n Tries to execute entry orders for new trades (positions)\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def enter_positions(self) -> int: trades_created = 0 whitelist = copy.deepcopy(self.active_pair_whitelist) if not whitelist: logger.info("Active pair whitelist is empty.") return trades_created # Remove pairs for currently opened trades from the whitelist for trade in Trade.get_open_trades(): if trade.pair in whitelist: whitelist.remove(trade.pair) logger.debug('Ignoring %s in pair whitelist', trade.pair) if not whitelist: logger.info("No currency pair in active pair whitelist, " "but checking to exit open trades.") return trades_created if PairLocks.is_global_lock(side='*'): # This only checks for total locks (both sides). # per-side locks will be evaluated by `is_pair_locked` within create_trade, # once the direction for the trade is clear. lock = PairLocks.get_pair_longest_lock('*') if lock: self.log_once(f"Global pairlock active until " f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. " f"Not creating new trades, reason: {lock.reason}.", logger.info) else: self.log_once("Global pairlock active. Not creating new trades.", logger.info) return trades_created # Create entity and execute trade for each pair from whitelist for pair in whitelist: try: trades_created += self.create_trade(pair) except DependencyException as exception: logger.warning('Unable to create trade for %s: %s', pair, exception) if not trades_created: logger.debug("Found no enter signals for whitelisted currencies. Trying again...") return trades_created
7,335
40,171
77
dash/_validate.py
22
8
def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( f ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( f )
f-strings everywhere! fffff
validate_js_path
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
dash
_validate.py
15
17
https://github.com/plotly/dash.git
3
40
0
15
93
Python
{ "docstring": "\n Error loading dependency. \"{package_name}\" is not a registered library.\n Registered libraries are:\n {list(registered_paths.keys())}\n \n \"{package_name}\" is registered but the path requested is not valid.\n The path requested: \"{path_in_package_dist}\"\n List of registered paths: {registered_paths}\n ", "language": "en", "n_whitespaces": 122, "n_words": 32, "vocab_size": 25 }
def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( f ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( f )
27,706
124,874
76
python/ray/serve/utils.py
36
12
def get_all_node_ids() -> List[Tuple[str, str]]: node_ids = [] # Sort on NodeID to ensure the ordering is deterministic across the cluster. for node in sorted(ray.nodes(), key=lambda entry: entry["NodeID"]): # print(node) if node["Alive"]: node_ids.append((node["NodeID"], node["NodeName"])) return node_ids
Revert "Revert "[serve] Use soft constraint for pinning controller on head node (#25091)" (#25857)" (#25858)
get_all_node_ids
0ecc7dad74d77a24705e44da2ba80892177377bc
ray
utils.py
14
11
https://github.com/ray-project/ray.git
3
65
0
33
110
Python
{ "docstring": "Get IDs for all live nodes in the cluster.\n\n Returns a list of (node_id: str, ip_address: str). The node_id can be\n passed into the Ray SchedulingPolicy API.\n ", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 26 }
def get_all_node_ids() -> List[Tuple[str, str]]: node_ids = [] # Sort on NodeID to ensure the ordering is deterministic across the cluster. for node in sorted(ray.nodes(), key=lambda entry: entry["NodeID"]): # print(node) if node["Alive"]: node_ids.append((node["NodeID"], node["NodeName"])) return node_ids
69,876
242,540
267
src/PIL/PpmImagePlugin.py
70
8
def _ignore_comments(self, block): comment_spans = False while True: comment_start = block.find(b"#") # look for next comment if comment_start == -1: # no comment found break comment_end = self._find_comment_end(block, comment_start) if comment_end != -1: # comment ends in this block block = ( block[:comment_start] + block[comment_end + 1 :] ) # delete comment else: # last comment continues to next block(s) block = block[:com
Implement bitonal decoder
_ignore_comments
ea7e108ca3c6fcd00014de370075ed0361a08138
Pillow
PpmImagePlugin.py
15
16
https://github.com/python-pillow/Pillow.git
4
80
0
45
136
Python
{ "docstring": "\n Deletes comments from block. If comment does not end in this\n block, raises a flag.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
def _ignore_comments(self, block): comment_spans = False while True: comment_start = block.find(b"#") # look for next comment if comment_start == -1: # no comment found break comment_end = self._find_comment_end(block, comment_start) if comment_end != -1: # comment ends in this block block = ( block[:comment_start] + block[comment_end + 1 :] ) # delete comment else: # last comment continues to next block(s) block = block[:comment_start] comment_spans = True break return block, comment_spans
28,826
128,859
18
python/ray/train/tests/test_gpu.py
9
5
def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
[AIR] Hard deprecate old Trainer, old callbacks (#29015) Hard deprecations for ray.train.Trainer, ray.train.callbacks and ray.train.checkpoint.CheckpointStrategy. Restart-on-failure logic from BackendExecutor has also been removed as it is superseded by Tune. Some tests have been refactored to use the new API. Tests that are no longer applicable have been removed. Signed-off-by: Antoni Baum <[email protected]> Signed-off-by: Amog Kamsetty <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
test_torch_auto_gpu_to_cpu
d99eff919bf785f911e4eebc87ddc4960344a139
ray
test_gpu.py
8
23
https://github.com/ray-project/ray.git
3
163
0
9
35
Python
{ "docstring": "Tests if GPU tensors are auto converted to CPU on driver.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
56,301
221,262
250
python3.10.4/Lib/calendar.py
60
19
def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % self.cssclass_year) a('\n') a('<tr><th colspan="%d" class="%s">%s</th></tr>' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmon
add python 3.10.4 for windows
formatyear
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
calendar.py
14
19
https://github.com/XX-net/XX-Net.git
3
131
0
52
223
Python
{ "docstring": "\n Return a formatted year as a table of tables.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % self.cssclass_year) a('\n') a('<tr><th colspan="%d" class="%s">%s</th></tr>' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmonth(theyear, m, withyear=False)) a('</td>') a('</tr>') a('</table>') return ''.join(v)
7,106
39,093
522
recommenders/models/sasrec/ssept.py
198
40
def predict(self, inputs): training = False user = inputs["user"] input_seq = inputs["input_seq"] candidate = inputs["candidate"] mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) u0_latent = self.user_embedding_layer(user) u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h) u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) u_latent = self.user_embedding_layer(user) u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h) u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) seq_embeddings = tf.reshape( tf.concat([seq_embeddings, u_latent], 2), [tf.shape(input_seq)[0], -1, self.hidden_units], ) seq_embeddings
cleanup-1
predict
f15d8b347b601069aba950a53f879e9659bd7c91
recommenders
ssept.py
13
40
https://github.com/microsoft/recommenders.git
1
378
0
95
578
Python
{ "docstring": "\n Model prediction for candidate (negative) items\n\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
def predict(self, inputs): training = False user = inputs["user"] input_seq = inputs["input_seq"] candidate = inputs["candidate"] mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) u0_latent = self.user_embedding_layer(user) u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h) u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) u_latent = self.user_embedding_layer(user) u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h) u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) seq_embeddings = tf.reshape( tf.concat([seq_embeddings, u_latent], 2), [tf.shape(input_seq)[0], -1, self.hidden_units], ) seq_embeddings += positional_embeddings # (b, s, h1 + h2) seq_embeddings *= mask seq_attention = seq_embeddings seq_attention = self.encoder(seq_attention, training, mask) seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) seq_emb = tf.reshape( seq_attention, [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], ) # (b*s1, h1+h2) candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2) candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2) candidate_emb = tf.reshape( tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units] ) # (b*s2, h1+h2) candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2) test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2) test_logits = tf.reshape( test_logits, [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], ) # (1, s, 101) test_logits = test_logits[:, -1, :] # (1, 101) return test_logits
70,075
243,702
197
src/PIL/Image.py
36
12
def tobitmap(self, name="image"): self.load() if self.mode != "1": msg = "not a bitmap" raise ValueError(msg) data = self.tobytes("xbm") return b"".join( [ f"#define {name}_width {self.size[0]}\n".encode("ascii"), f"#define {name}_height {self.size[1]}\n".encode("ascii"), f"static char {name}_bits[] = {{\n".encode("ascii"), data, b"};", ] )
Improve exception traceback readability
tobitmap
2ae55ccbdad9c842929fb238ea1eb81d1f999024
Pillow
Image.py
14
15
https://github.com/python-pillow/Pillow.git
2
76
0
33
173
Python
{ "docstring": "\n Returns the image converted to an X11 bitmap.\n\n .. note:: This method only works for mode \"1\" images.\n\n :param name: The name prefix to use for the bitmap variables.\n :returns: A string containing an X11 bitmap.\n :raises ValueError: If the mode is not \"1\"\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 35 }
def tobitmap(self, name="image"): self.load() if self.mode != "1": msg = "not a bitmap" raise ValueError(msg) data = self.tobytes("xbm") return b"".join( [ f"#define {name}_width {self.size[0]}\n".encode("ascii"), f"#define {name}_height {self.size[1]}\n".encode("ascii"), f"static char {name}_bits[] = {{\n".encode("ascii"), data, b"};", ] )
42,422
177,528
455
networkx/classes/digraph.py
102
22
def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") if u not in self._succ: if u is None: raise ValueError("None cannot be a node") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError("None cannot be a node") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.a
doc: update documentation when providing an iterator over current graph to add/remove_edges_from. (#6268) * doc for add_edges_from * doc for digraph * doc for multigraph * multigraph.add_nodes_from returns keylist * update docs for graph - edges * doc update: graph.add_nodes_from * doc update: graph.remove_nodes_from * doc update: graph.add_edges_from * doc update: rewording for graph.add_edges_from * doc update: graph.add_weighted_edges_from rewording * doc update: digraph updated as graph * doc update: digraph minor sync * doc update: multigraph same as graph * Update graph.py * Update digraph.py * Update multigraph.py
add_edges_from
979d54acba7c3d372c93d44c6c149700608ce8b0
networkx
digraph.py
14
27
https://github.com/networkx/networkx.git
8
217
0
55
350
Python
{ "docstring": "Add all the edges in ebunch_to_add.\n\n Parameters\n ----------\n ebunch_to_add : container of edges\n Each edge given in the container will be added to the\n graph. The edges must be given as 2-tuples (u, v) or\n 3-tuples (u, v, d) where d is a dictionary containing edge data.\n attr : keyword arguments, optional\n Edge data (or labels or objects) can be assigned using\n keyword arguments.\n\n See Also\n --------\n add_edge : add a single edge\n add_weighted_edges_from : convenient way to add weighted edges\n\n Notes\n -----\n Adding the same edge twice has no effect but any edge data\n will be updated when each duplicate edge is added.\n\n Edge attributes specified in an ebunch take precedence over\n attributes specified via keyword arguments.\n\n When adding edges from an iterator over the graph you are changing,\n a `RuntimeError` can be raised with message:\n `RuntimeError: dictionary changed size during iteration`. This\n happens when the graph's underlying dictionary is modified during\n iteration. To avoid this error, evaluate the iterator into a separate\n object, e.g. by using `list(iterator_of_edges)`, and pass this\n object to `G.add_edges_from`.\n\n Examples\n --------\n >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc\n >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples\n >>> e = zip(range(0, 3), range(1, 4))\n >>> G.add_edges_from(e) # Add the path graph 0-1-2-3\n\n Associate data to edges\n\n >>> G.add_edges_from([(1, 2), (2, 3)], weight=3)\n >>> G.add_edges_from([(3, 4), (1, 4)], label=\"WN2898\")\n\n Evaluate an iterator over a graph if using it to modify the same graph\n\n >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])\n >>> # Grow graph by one new node, adding edges to all existing nodes.\n >>> # wrong way - will raise RuntimeError\n >>> # G.add_edges_from(((5, n) for n in G.nodes))\n >>> # right way - note that there will be no self-edge for node 5\n >>> G.add_edges_from(list((5, n) for n in G.nodes))\n ", "language": "en", "n_whitespaces": 629, "n_words": 305, "vocab_size": 185 }
def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") if u not in self._succ: if u is None: raise ValueError("None cannot be a node") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError("None cannot be a node") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) datadict.update(attr) datadict.update(dd) self._succ[u][v] = datadict self._pred[v][u] = datadict
20,022
100,558
60
lib/gpu_stats/amd.py
16
10
def _select_device(self) -> None: if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member self._log("debug", "Setting PlaidML devices from user_setting
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
_select_device
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
faceswap
amd.py
10
8
https://github.com/deepfakes/faceswap.git
2
37
0
16
68
Python
{ "docstring": "\n If the plaidml user configuration settings exist, then set the default GPU from the\n settings file, Otherwise set the GPU to be the one with most VRAM. ", "language": "en", "n_whitespaces": 42, "n_words": 27, "vocab_size": 20 }
def _select_device(self) -> None: if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member self._log("debug", "Setting PlaidML devices from user_settings") else: self._select_largest_gpu()
54,179
215,789
75
tests/pytests/functional/modules/file/test_readlink.py
26
12
def test_readlink_non_canonical(file, source): int
Add some funtional tests Add functional tests for the following: - file.readlink - file.replace - file.symlink Remove unit tests for file.replace as they are duplicated in the added functional test
test_readlink_non_canonical
a35b29b2651bf33c5d5b45e64bc7765ffde4aff4
salt
test_readlink.py
11
11
https://github.com/saltstack/salt.git
2
65
0
21
114
Python
{ "docstring": "\n Test readlink where there are nested symlinks and canonicalize=False\n Should resolve to the first symlink\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
def test_readlink_non_canonical(file, source): intermediate = source.parent / "intermediate.lnk" intermediate.symlink_to(source) target = source.parent / "symlink.lnk" target.symlink_to(intermediate) try: result = file.readlink(path=target) assert result == str(intermediate) finally: intermediate.unlink() target.unlink()
4,229
22,159
140
pipenv/patched/pip/_vendor/requests/utils.py
49
11
def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get("al
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
select_proxy
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
utils.py
12
17
https://github.com/pypa/pipenv.git
5
91
0
35
148
Python
{ "docstring": "Select a proxy for the url, if applicable.\n\n :param url: The url being for the request\n :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs\n ", "language": "en", "n_whitespaces": 38, "n_words": 29, "vocab_size": 24 }
def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get("all")) proxy_keys = [ urlparts.scheme + "://" + urlparts.hostname, urlparts.scheme, "all://" + urlparts.hostname, "all", ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy
56,085
220,693
194
python3.10.4/Lib/asyncio/sslproto.py
31
14
def eof_received(self): try:
add python 3.10.4 for windows
eof_received
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
sslproto.py
15
12
https://github.com/XX-net/XX-Net.git
5
65
0
29
118
Python
{ "docstring": "Called when the other end of the low-level stream\n is half-closed.\n\n If this returns a false value (including None), the transport\n will close itself. If it returns a true value, closing the\n transport is up to the protocol.\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 29 }
def eof_received(self): try: if self._loop.get_debug(): logger.debug("%r received EOF", self) self._wakeup_waiter(ConnectionResetError) if not self._in_handshake: keep_open = self._app_protocol.eof_received() if keep_open: logger.warning('returning true from eof_received() ' 'has no effect when using ssl') finally: self._transport.close()
39,190
162,332
157
yt_dlp/extractor/common.py
49
10
def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, }
[extractor] Improve `url_result` and related
url_result
311b6615d85d3530f2709c50e4223ff3b6b14361
yt-dlp
common.py
11
12
https://github.com/yt-dlp/yt-dlp.git
6
94
0
33
151
Python
{ "docstring": "Returns a URL that points to a page that should be processed", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, }
23,904
110,064
121
lib/mpl_toolkits/mplot3d/art3d.py
48
24
def _shade_colors(color, normals, lightsource=None): if lightsource is None: # chosen for backwards-compatibility lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712) with np.errstate(invalid="ignore"): shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True)) @ lightsource.direction) mask = ~np.isnan(shade)
Refactor shading
_shade_colors
d9d75f2bbf340034a93bdf8cd913fa83a71ece9c
matplotlib
art3d.py
15
19
https://github.com/matplotlib/matplotlib.git
3
176
0
41
174
Python
{ "docstring": "\n Shade *color* using normal vectors given by *normals*,\n assuming a *lightsource* (using default position if not given).\n *color* can also be an array of the same length as *normals*.\n ", "language": "en", "n_whitespaces": 42, "n_words": 29, "vocab_size": 28 }
def _shade_colors(color, normals, lightsource=None): if lightsource is None: # chosen for backwards-compatibility lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712) with np.errstate(invalid="ignore"): shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True)) @ lightsource.direction) mask = ~np.isnan(shade) if mask.any(): # convert dot product to allowed shading fractions in_norm = mcolors.Normalize(-1, 1) out_norm = mcolors.Normalize(0.3, 1).inverse
76,391
260,641
31
sklearn/feature_selection/_rfe.py
10
8
def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params)
MAINT solve long line reported by flake8 (#24065)
score
6e5ef2e9b8c64e6788428610ae884b9bf3d298a2
scikit-learn
_rfe.py
9
3
https://github.com/scikit-learn/scikit-learn.git
1
36
0
9
56
Python
{ "docstring": "Reduce X to the selected features and return the score of the estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n\n **fit_params : dict\n Parameters to pass to the `score` method of the underlying\n estimator.\n\n .. versionadded:: 1.0\n\n Returns\n -------\n score : float\n Score of the underlying base estimator computed with the selected\n features returned by `rfe.transform(X)` and `y`.\n ", "language": "en", "n_whitespaces": 212, "n_words": 72, "vocab_size": 46 }
def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params)
46,959
194,423
68
kivy/effects/scroll.py
18
8
def reset(self, pos): self.value = pos self.velocity = 0 if self.history:
ScrollEffect: Fix layout when ScrollView gets resized
reset
b046b560ef3cebbe2573327017793bc2c348aecd
kivy
scroll.py
12
6
https://github.com/kivy/kivy.git
2
48
0
15
77
Python
{ "docstring": "(internal) Reset the value and the velocity to the `pos`.\n Mostly used when the bounds are checked.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 14 }
def reset(self, pos): self.value = pos self.velocity = 0 if self.history: val = self.history[-1][1] self.history = [(time(), val)]
@pytest.mark.parametrize( "percentage, expected_result", [ (1, 2), (100, 50), (50, 26), ], )
108,182
309,482
53
tests/components/tradfri/test_util.py
19
7
def test_from_fan_speed(fan_speed, expected_result): assert _from_fan_speed(fan_speed) == expected_result @pytes
Bump pytradfri to 8.0.1 and fix fan preset mode "Auto" bug (#63920) * Move util functions * Fix errors * Revert changes * Fix tests * Use self.async_set_percentage() * Fix calculation functions and associated tests * Handle case of 0 * Update tests/components/tradfri/test_util.py Co-authored-by: Martin Hjelmare <[email protected]> * Update tests/components/tradfri/test_util.py Co-authored-by: Martin Hjelmare <[email protected]> * Update tests/components/tradfri/test_util.py Co-authored-by: Martin Hjelmare <[email protected]> * Handle case of 0 * Update homeassistant/components/tradfri/fan.py Co-authored-by: Martin Hjelmare <[email protected]> Co-authored-by: Martin Hjelmare <[email protected]>
test_from_fan_speed
b52a8ba37a5e5e05b80beddff06b116371941d86
core
test_util.py
8
2
https://github.com/home-assistant/core.git
1
15
1
19
69
Python
{ "docstring": "Test that we can convert fan speed to percentage value.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_from_fan_speed(fan_speed, expected_result): assert _from_fan_speed(fan_speed) == expected_result @pytest.mark.parametrize( "percentage, expected_result", [ (1, 2), (100, 50), (50, 26), ], )
@pytest.fixture
18,735
91,199
17
src/sentry/utils/pytest/fixtures.py
9
7
def task_runner(): from sentry.testutils.helpers.task_runner import TaskRunner return Task
ref(proj-config): Introduce new tasks (#35238)
task_runner
2058dd477767e47c9fce603766a45e1fbe29c33d
sentry
fixtures.py
6
3
https://github.com/getsentry/sentry.git
1
17
1
8
35
Python
{ "docstring": "Context manager that ensures Celery tasks run directly inline where invoked.\n\n While this context manager is active any Celery tasks created will run immediately at\n the callsite rather than being sent to RabbitMQ and handled by a worker.\n ", "language": "en", "n_whitespaces": 47, "n_words": 38, "vocab_size": 34 }
def task_runner(): from sentry.testutils.helpers.task_runner import TaskRunner return TaskRunner @pytest.fixture
51,579
206,586
15
django/utils/crypto.py
9
9
def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): return "".join(secrets.choice(allo
Refs #33476 -- Reformatted code with Black.
get_random_string
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
crypto.py
10
2
https://github.com/django/django.git
2
29
0
9
49
Python
{ "docstring": "\n Return a securely generated random string.\n\n The bit length of the returned value can be calculated with the formula:\n log_2(len(allowed_chars)^length)\n\n For example, with default `allowed_chars` (26+26+10), this gives:\n * length: 12, bit length =~ 71 bits\n * length: 22, bit length =~ 131 bits\n ", "language": "en", "n_whitespaces": 74, "n_words": 44, "vocab_size": 34 }
def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): return "".join(secrets.choice(allowed_chars) for i in range(length))
73,901
251,953
35
test/mitmproxy/proxy/test_tutils.py
17
8
def test_command_reply(tplaybook): tplaybook >> TEvent() tplaybook << TCommand() tplaybook >> tutils.reply() assert tplaybook assert tplaybook.actual[1] == tplaybook.actual[2].command
make it black!
test_command_reply
b3587b52b25077f68116b9852b041d33e7fc6601
mitmproxy
test_tutils.py
9
6
https://github.com/mitmproxy/mitmproxy.git
1
44
0
12
69
Python
{ "docstring": "CommandReplies can use relative offsets to point to the matching command.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def test_command_reply(tplaybook): tplaybook >> TEvent() tplaybook << TCommand() tplaybook >> tutils.reply() assert tplaybook assert tplaybook.actual[1] == tplaybook.actual[2].command
80,259
269,762
152
keras/benchmarks/distribution_util.py
47
11
def _mirrored_cross_device_ops(all_reduce_alg, num_packs): if all_reduce_alg is None: return None mirrored_all_reduce_options = { "nccl": tf.distribute.NcclAllReduce, "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce, } if al
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_mirrored_cross_device_ops
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
distribution_util.py
13
16
https://github.com/keras-team/keras.git
3
65
0
40
110
Python
{ "docstring": "Return a CrossDeviceOps based on all_reduce_alg and num_packs.\n\n Args:\n all_reduce_alg: a string specifying which cross device op to pick, or None.\n num_packs: an integer specifying number of packs for the cross device op.\n\n Returns:\n tf.distribute.CrossDeviceOps object or None.\n\n Raises:\n ValueError: if `all_reduce_alg` not in [None, \"nccl\", \"hierarchical_copy\"].\n ", "language": "en", "n_whitespaces": 79, "n_words": 47, "vocab_size": 41 }
def _mirrored_cross_device_ops(all_reduce_alg, num_packs): if all_reduce_alg is None: return None mirrored_all_reduce_options = { "nccl": tf.distribute.NcclAllReduce, "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce, } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( "When used with `mirrored`, valid values for all_reduce_alg are " "[`nccl`, `hierarchical_copy`]. Supplied value: {}".format( all_reduce_alg ) ) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs)
7,966
43,461
46
tests/providers/microsoft/azure/hooks/test_asb.py
11
14
def test_delete_queue(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=sel
Implement Azure Service Bus Queue Operators (#24038) Implemented Azure Service Bus Queue based Operator's to create queue, send message to the queue and receive message(list of message or batch message) and delete queue in azure service - Added `AzureServiceBusCreateQueueOperator` - Added `AzureServiceBusSendMessageOperator` - Added `AzureServiceBusReceiveMessageOperator` - Added `AzureServiceBusDeleteQueueOperator` - Added Example DAG - Added Documentation - Added hooks and connection type in - provider yaml file - Added unit Test case, doc strings
test_delete_queue
09f38ad3f6872bae5059a1de226362eb358c4a7a
airflow
test_asb.py
13
5
https://github.com/apache/airflow.git
1
52
0
10
87
Python
{ "docstring": "\n Test Delete queue functionality by passing queue name, assert the function with values,\n mock the azure service bus function `delete_queue`\n ", "language": "en", "n_whitespaces": 43, "n_words": 20, "vocab_size": 17 }
def test_delete_queue(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id) hook.delete_queue(self.queue_name) expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)] mock_sb_admin_client.assert_has_calls(expected_calls)
57,056
223,772
77
python3.10.4/Lib/email/message.py
25
10
def get_content_disposition(self): value = self.get('content-disposition') if value is None: return None c_d = _splitparam(value)[0].lower() retu
add python 3.10.4 for windows
get_content_disposition
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
message.py
11
6
https://github.com/XX-net/XX-Net.git
2
36
0
20
73
Python
{ "docstring": "Return the message's content-disposition if it exists, or None.\n\n The return values can be either 'inline', 'attachment' or None\n according to the rfc2183.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 21 }
def get_content_disposition(self): value = self.get('content-disposition') if value is None: return None c_d = _splitparam(value)[0].lower() return c_d # I.e. def walk(self): ... from email.iterators import walk
49,056
198,878
165
sympy/printing/aesaracode.py
51
13
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value
fix(printing): change Aesara argument broadcastable to shape
_get_or_create
68bd82de645a61f4bbc0b6246e70959373c9cba2
sympy
aesaracode.py
9
13
https://github.com/sympy/sympy.git
5
107
0
30
164
Python
{ "docstring": "\n Get the Aesara variable for a SymPy symbol from the cache, or create it\n if it does not exist.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value return value
51,203
205,769
111
django/db/models/query.py
29
11
def defer(self, *fields): self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,):
Refs #33476 -- Reformatted code with Black.
defer
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
query.py
11
10
https://github.com/django/django.git
3
62
0
27
107
Python
{ "docstring": "\n Defer the loading of data for certain fields until they are accessed.\n Add the set of deferred fields to any existing set of deferred fields.\n The only exception to this is if None is passed in as the only\n parameter, in which case removal all deferrals.\n ", "language": "en", "n_whitespaces": 82, "n_words": 46, "vocab_size": 35 }
def defer(self, *fields): self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone
55,500
218,848
45
python3.10.4/Lib/lib2to3/pytree.py
13
6
def match_seq(self, nodes, results=None): if len(nodes) != 1:
add python 3.10.4 for windows
match_seq
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
pytree.py
8
4
https://github.com/XX-net/XX-Net.git
2
34
0
12
53
Python
{ "docstring": "\n Does this pattern exactly match a sequence of nodes?\n\n Default implementation for non-wildcard patterns.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
def match_seq(self, nodes, results=None): if len(nodes) != 1: return False return self.match(nodes[0], results)
18,336
87,837
24
src/sentry/auth/access.py
10
9
def team_ids_with_membership(self) -> FrozenSet[int]: return frozenset(team.id for team in self._
ref(access): Remove models from Access fields (#40940) Anticipating changes for Hybrid Cloud silo boundaries, change the public interface of the `Access` class to not expose any ORM models as dataclass fields. As a first step, replace all such objects with their raw IDs. (Credit to @corps for the underlying idea. Future steps: replace models as method parameters; replace raw IDs with API object representations.)
team_ids_with_membership
b3ce25d7c3ce85a9b7195f97c6d3d76c764e1808
sentry
access.py
11
11
https://github.com/getsentry/sentry.git
2
28
0
10
46
Python
{ "docstring": "Return the IDs of teams in which the user has actual membership.\n\n This represents the set of all teams for which `has_team_membership` returns\n true. Use that method where possible and use this property only when you need\n to iterate or query for all such teams.\n\n Compare to accessible_team_ids, which is equal to this property in the\n typical case but represents a superset of IDs in case of superuser access.\n ", "language": "en", "n_whitespaces": 111, "n_words": 69, "vocab_size": 49 }
def team_ids_with_membership(self) -> FrozenSet[int]: return frozenset(team.id for team in self._team_memberships.keys())
30,547
135,117
337
rllib/models/tests/test_distributions.py
99
32
def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=("tf2", "tf"), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected)
[RLlib] Deprecate `AlgorithmConfig.framework("tfe")`: Use `tf2` instead. (#29755)
test_gumbel_softmax
432f023642731bf53aac9b6c778f9dd7b1d82a57
ray
test_distributions.py
15
18
https://github.com/ray-project/ray.git
3
188
0
71
286
Python
{ "docstring": "Tests the GumbelSoftmax ActionDistribution (tf + eager only).", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=("tf2", "tf"), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected) # Batch of size=n and non-deterministic -> expect roughly that # the max-likelihood (argmax) ints are output (most of the time). inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32) outs = gumbel_softmax.sample() if sess: outs = sess.run(outs) check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08)
1,562
9,140
105
parsing/dml_csr/utils/miou.py
43
16
def get_confusion_matrix(gt_label, pred_label, num_classes): index = (gt_label * num_classes + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((num_classes, num_classes)) for i_label in range(num_classes): for i_pred_label in range(num_classes): cur_index = i_label * num_classes + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] =
Create miou.py
get_confusion_matrix
995b44897fe6158bb70ad03a3e79f517f65f9034
insightface
miou.py
13
10
https://github.com/deepinsight/insightface.git
4
88
0
29
138
Python
{ "docstring": "\n Calcute the confusion matrix by given label and pred\n :param gt_label: the ground truth label\n :param pred_label: the pred label\n :param num_classes: the nunber of class\n :return: the confusion matrix\n ", "language": "en", "n_whitespaces": 49, "n_words": 30, "vocab_size": 19 }
def get_confusion_matrix(gt_label, pred_label, num_classes): index = (gt_label * num_classes + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((num_classes, num_classes)) for i_label in range(num_classes): for i_pred_label in range(num_classes): cur_index = i_label * num_classes + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
38,875
161,059
232
ppg2mel/utils/nets_utils.py
103
35
def make_pad_mask(lengths, xs=None, length_dim=-1): if length_dim == 0: raise ValueError('length_dim cannot be 0: {}'.format(length_dim)) if not isinstance(lengths, list): lengths = lengths.tolist() bs = int(len(lengths)) if xs is None: maxlen = int(max(lengths)) else: maxlen = xs.size(length_dim) seq_range = torch.arange(0, maxlen, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand if xs is not None: assert xs.size(0) == bs, (xs.size(0), bs) if length_dim < 0: length_dim = xs.dim() + length_dim # ind = (:, None, ..., None, :, , None, ..., None) ind =
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
make_pad_mask
b617a87ee40ab384767a27335313c2c65ee094ec
MockingBird
nets_utils.py
15
22
https://github.com/babysor/MockingBird.git
8
219
0
66
347
Python
{ "docstring": "Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor. See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n ", "language": "en", "n_whitespaces": 1334, "n_words": 417, "vocab_size": 87 }
def make_pad_mask(lengths, xs=None, length_dim=-1): if length_dim == 0: raise ValueError('length_dim cannot be 0: {}'.format(length_dim)) if not isinstance(lengths, list): lengths = lengths.tolist() bs = int(len(lengths)) if xs is None: maxlen = int(max(lengths)) else: maxlen = xs.size(length_dim) seq_range = torch.arange(0, maxlen, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand if xs is not None: assert xs.size(0) == bs, (xs.size(0), bs) if length_dim < 0: length_dim = xs.dim() + length_dim # ind = (:, None, ..., None, :, , None, ..., None) ind = tuple(slice(None) if i in (0, length_dim) else None for i in range(xs.dim())) mask = mask[ind].expand_as(xs).to(xs.device) return mask
17,843
84,491
149
zerver/tests/test_upload.py
58
9
def test_guess_content_type_from_filename(self) -> None: data, content_type = encode_multipart_formdata({"file": ("somefile"
upload: Remove `mimetype` url parameter in `get_file_info`. This `mimetype` parameter was introduced in c4fa29a and its last usage removed in 5bab2a3. This parameter was undocumented in the OpenAPI endpoint documentation for `/user_uploads`, therefore there shouldn't be client implementations that rely on it's presence. Removes the `request.GET` call for the `mimetype` parameter and replaces it by getting the `content_type` value from the file, which is an instance of Django's `UploadedFile` class and stores that file metadata as a property. If that returns `None` or an empty string, then we try to guess the `content_type` from the filename, which is the same as the previous behaviour when `mimetype` was `None` (which we assume has been true since it's usage was removed; see above). If unable to guess the `content_type` from the filename, we now fallback to "application/octet-stream", instead of an empty string or `None` value. Also, removes the specific test written for having `mimetype` as a url parameter in the request, and replaces it with a test that covers when we try to guess `content_type` from the filename.
test_guess_content_type_from_filename
aa796af0a8b665ee730a059bc2594ae21cb1e828
zulip
test_upload.py
12
15
https://github.com/zulip/zulip.git
1
100
0
40
170
Python
{ "docstring": "\n Test coverage for files without content-type in the metadata;\n in which case we try to guess the content-type from the filename.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 17 }
def test_guess_content_type_from_filename(self) -> None: data, content_type = encode_multipart_formdata({"file": ("somefile", b"zulip!", None)}) result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads", data, content_type=content_type ) self.assert_json_success(result) data, content_type = encode_multipart_formdata({"file": ("somefile.txt", b"zulip!", None)}) result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads", data, content_type=content_type ) self.assert_json_success(result) # This test will go through the code path for uploading files onto LOCAL storage # when Zulip is in DEVELOPMENT mode.
56,518
221,801
166
python3.10.4/Lib/ctypes/_aix.py
85
9
def get_member(name, members): # look first for a generic match - prepend lib and append .so expr = rf'lib{name}\.so' member = get_one_match(expr, members) if member: return member elif AIX_ABI == 64: expr = rf'lib{name
add python 3.10.4 for windows
get_member
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_aix.py
11
15
https://github.com/XX-net/XX-Net.git
5
67
0
49
121
Python
{ "docstring": "\n Return an archive member matching the request in name.\n Name is the library name without any prefix like lib, suffix like .so,\n or version number.\n Given a list of members find and return the most appropriate result\n Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c\n and finally, legacy AIX naming scheme.\n ", "language": "en", "n_whitespaces": 75, "n_words": 53, "vocab_size": 47 }
def get_member(name, members): # look first for a generic match - prepend lib and append .so expr = rf'lib{name}\.so' member = get_one_match(expr, members) if member: return member elif AIX_ABI == 64: expr = rf'lib{name}64\.so' member = get_one_match(expr, members) if member: return member # since an exact match with .so as suffix was not found # look for a versioned name # If a versioned name is not found, look for AIX legacy member name member = get_version(name, members) if member: return member else: return get_legacy(members)
8,176
44,137
40
tests/core/test_impersonation_tests.py
12
8
def check_original_docker_image(): if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAG
Fixed tests failing on Python 3.8 (#21022) The change #21003 broke TestDeprecation class tests by removing TestCase and leaving self.skipTest. This change replaces self.skipTest with pytest.skipTest everywhere.
check_original_docker_image
b96e4992b5df24898e169c01fe23e4cb7d32dd94
airflow
test_impersonation_tests.py
10
9
https://github.com/apache/airflow.git
3
33
0
12
60
Python
{ "docstring": "Adding/removing a user as part of a test is very bad for host os\n(especially if the user already existed to begin with on the OS), therefore we check if we run inside a\nthe official docker container and only allow to run the test there. This is done by checking /.dockerenv\nfile (always present inside container) and checking for PYTHON_BASE_IMAGE variable.\n", "language": "en", "n_whitespaces": 58, "n_words": 62, "vocab_size": 46 }
def check_original_docker_image(): if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None: raise pytest.skip( )
27,392
123,487
631
lib/core/option.py
253
37
def _useWizardInterface(): if not conf.wizard: return logger.info("starting wizard interface") while not conf.url: message = "Please enter full target URL (-u): " conf.url = readInput(message, default=None) message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) conf.data = readInput(message, default=None) if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url): warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). " if not conf.crawlDepth and not conf.forms: warnMsg += "Will search for forms" conf.forms = True logger.warning(warnMsg) choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Injection difficulty (--level/--risk). Please choose:\n" message += "[1] Normal (default)\n[2] Medium\n[3] Hard" choice = readInput(message, default='1') if choice == '2': conf.risk = 2 conf.level = 3 elif choice == '3': conf.risk = 3 conf.level = 5 else: conf.risk = 1 conf.level = 1 if not conf.getAll: choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Enumeration (--banner/--current-user/etc). Please choose:\n" message += "[1] Basic (default)\n[2] Intermediate\n[3] All" choice = readInput(message, default='1') if choice == '2': options = WIZARD.INTERMEDIATE elif choice == '3': options = WIZARD.ALL else: options = WIZARD.BASIC for _ in options: conf.__setitem__(_, Tru
Fixing DeprecationWarning (logger.warn)
_useWizardInterface
df4293473d2fb6e887e31522cab5aff95e201581
sqlmap
option.py
15
50
https://github.com/sqlmapproject/sqlmap.git
22
350
0
128
611
Python
{ "docstring": "\n Presents simple wizard interface for beginner users\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def _useWizardInterface(): if not conf.wizard: return logger.info("starting wizard interface") while not conf.url: message = "Please enter full target URL (-u): " conf.url = readInput(message, default=None) message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) conf.data = readInput(message, default=None) if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url): warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). " if not conf.crawlDepth and not conf.forms: warnMsg += "Will search for forms" conf.forms = True logger.warning(warnMsg) choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Injection difficulty (--level/--risk). Please choose:\n" message += "[1] Normal (default)\n[2] Medium\n[3] Hard" choice = readInput(message, default='1') if choice == '2': conf.risk = 2 conf.level = 3 elif choice == '3': conf.risk = 3 conf.level = 5 else: conf.risk = 1 conf.level = 1 if not conf.getAll: choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Enumeration (--banner/--current-user/etc). Please choose:\n" message += "[1] Basic (default)\n[2] Intermediate\n[3] All" choice = readInput(message, default='1') if choice == '2': options = WIZARD.INTERMEDIATE elif choice == '3': options = WIZARD.ALL else: options = WIZARD.BASIC for _ in options: conf.__setitem__(_, True) logger.debug("muting sqlmap.. it will do the magic for you") conf.verbose = 0 conf.batch = True conf.threads = 4 dataToStdout("\nsqlmap is running, please wait..\n\n") kb.wizardMode = True
10,737
53,209
146
src/prefect/orion/alembic/env.py
63
16
async def run_migrations_online() -> None: engine = await db_interface.engine() versi
initial commit
run_migrations_online
88a2e91018e5efe2970ba86238d69d1031350593
prefect
env.py
16
20
https://github.com/PrefectHQ/prefect.git
4
117
0
44
183
Python
{ "docstring": "\n Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n ", "language": "en", "n_whitespaces": 34, "n_words": 21, "vocab_size": 21 }
async def run_migrations_online() -> None: engine = await db_interface.engine() versions_dir = context.get_x_argument(as_dictionary=True).get("versions_dir", None) if versions_dir is None: # if version dir is not explicitly provided determine versions location from dialect dialect = get_dialect(engine=engine) if dialect.name == "postgresql": versions_dir = Path(context.script.dir / "postgresql") elif dialect.name == "sqlite": versions_dir = Path(context.script.dir / "sqlite") else: raise ValueError(f"No versions dir exists for dialect: {dialect.name}") context.script.version_locations = [versions_dir]
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
13,751
64,915
16
erpnext/accounts/doctype/payment_order/payment_order.py
23
13
def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {"parent": filters.get("parent"), "start": start, "page_len": page_len, "txt": "%%%s%%" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
style: format code with black
get_mop_query
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
payment_order.py
12
7
https://github.com/frappe/erpnext.git
1
50
1
21
99
Python
{ "docstring": " select mode_of_payment from `tabPayment Order Reference`\n\t\twhere parent = %(parent)s and mode_of_payment like %(txt)s\n\t\tlimit %(start)s, %(page_len)s", "language": "en", "n_whitespaces": 15, "n_words": 17, "vocab_size": 16 }
def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {"parent": filters.get("parent"), "start": start, "page_len": page_len, "txt": "%%%s%%" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
23,246
108,535
208
lib/matplotlib/tests/test_pyplot.py
88
27
def test_doc_pyplot_summary(): pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' if not pyplot_docs.exists(): pytest.skip("Documentation sources not available") lines = pyplot_docs.read_text() m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL) doc_functions = set(line.strip() for line in m.group(1).split('\n')) plot_commands = set(plt.get_pl
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <[email protected]>
test_doc_pyplot_summary
032316bc6c7798fca6c82de24167c975f237687f
matplotlib
test_pyplot.py
13
20
https://github.com/matplotlib/matplotlib.git
5
127
0
60
228
Python
{ "docstring": "Test that pyplot_summary lists all the plot functions.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_doc_pyplot_summary(): pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' if not pyplot_docs.exists(): pytest.skip("Documentation sources not available") lines = pyplot_docs.read_text() m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL) doc_functions = set(line.strip() for line in m.group(1).split('\n')) plot_commands = set(plt.get_plot_commands()) missing = plot_commands.difference(doc_functions) if missing: raise AssertionError( f"The following pyplot functions are not listed in the " f"documentation. Please add them to doc/api/pyplot_summary.rst: " f"{missing!r}") extra = doc_functions.difference(plot_commands) if extra: raise AssertionError( f"The following functions are listed in the pyplot documentation, " f"but they do not exist in pyplot. " f"Please remove them from doc/api/pyplot_summary.rst: {extra!r}")
13,956
65,618
75
erpnext/controllers/accounts_controller.py
107
16
def validate_child_on_delete(row, parent): if parent.doctype == "Sales Order": if flt(row.delivered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been delivered").format( row.idx, row.item_code ) ) if flt(row.work_order_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has work order assigned to it.").format( row.idx, row.item_code ) ) if flt(row.ordered_qty): frappe.throw( _("Row #{0}: Ca
style: format code with black
validate_child_on_delete
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
accounts_controller.py
16
32
https://github.com/frappe/erpnext.git
8
161
0
42
269
Python
{ "docstring": "Check if partially transacted item (row) is being deleted.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def validate_child_on_delete(row, parent): if parent.doctype == "Sales Order": if flt(row.delivered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been delivered").format( row.idx, row.item_code ) ) if flt(row.work_order_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has work order assigned to it.").format( row.idx, row.item_code ) ) if flt(row.ordered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which is assigned to customer's purchase order.").format( row.idx, row.item_code ) ) if parent.doctype == "Purchase Order" and flt(row.received_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been received").format( row.idx, row.item_code ) ) if flt(row.billed_amt): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been billed.").format( row.idx, row.item_code ) )
20,730
101,311
496
scripts/fsmedia.py
108
23
def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items["DebugLandmarks"] = None # Face Filter post processing if ((hasattr(self._args, "filter") and self._args.filter is not None) or (hasattr(self._args, "nfilter") and self._args.nfilter is not None)): if hasattr(self._args, "detector"): detector = self._args.detector.replace("-", "_").lower() else: detector = "cv2_dnn" if hasattr(self._args, "aligner"): aligner = self._args.aligner.replace("-", "_").lower() else: aligner = "cv2_dnn" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, "ref_threshold"
bugfix: debug landmarks
_get_items
9e503bdaa2bfe2baaea50ad2e4bf742f309d9d10
faceswap
fsmedia.py
16
29
https://github.com/deepfakes/faceswap.git
12
249
0
67
422
Python
{ "docstring": " Check the passed in command line arguments for requested actions,\n\n For any requested actions, add the item to the actions list along with\n any relevant arguments and keyword arguments.\n\n Returns\n -------\n dict\n The name of the action to be performed as the key. Any action specific\n arguments and keyword arguments as the value.\n ", "language": "en", "n_whitespaces": 118, "n_words": 53, "vocab_size": 37 }
def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items["DebugLandmarks"] = None # Face Filter post processing if ((hasattr(self._args, "filter") and self._args.filter is not None) or (hasattr(self._args, "nfilter") and self._args.nfilter is not None)): if hasattr(self._args, "detector"): detector = self._args.detector.replace("-", "_").lower() else: detector = "cv2_dnn" if hasattr(self._args, "aligner"): aligner = self._args.aligner.replace("-", "_").lower() else: aligner = "cv2_dnn" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, "ref_threshold"): face_filter["ref_threshold"] = self._args.ref_threshold for filter_type in ('filter', 'nfilter'): filter_args = getattr(self._args, filter_type, None) filter_args = None if not filter_args else filter_args filter_lists[filter_type] = filter_args face_filter["filter_lists"] = filter_lists postprocess_items["FaceFilter"] = {"kwargs": face_filter} logger.debug("Postprocess Items: %s", postprocess_items) return postprocess_items
55,370
218,532
255
python3.10.4/Lib/ipaddress.py
83
15
def _collapse_addresses_internal(addresses): # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are
add python 3.10.4 for windows
_collapse_addresses_internal
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
ipaddress.py
12
19
https://github.com/XX-net/XX-Net.git
7
104
0
56
177
Python
{ "docstring": "Loops through the addresses, collapsing concurrent netblocks.\n\n Example:\n\n ip1 = IPv4Network('192.0.2.0/26')\n ip2 = IPv4Network('192.0.2.64/26')\n ip3 = IPv4Network('192.0.2.128/26')\n ip4 = IPv4Network('192.0.2.192/26')\n\n _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->\n [IPv4Network('192.0.2.0/24')]\n\n This shouldn't be called directly; it is called via\n collapse_addresses([]).\n\n Args:\n addresses: A list of IPv4Network's or IPv6Network's\n\n Returns:\n A list of IPv4Network's or IPv6Network's depending on what we were\n passed.\n\n ", "language": "en", "n_whitespaces": 150, "n_words": 57, "vocab_size": 47 }
def _collapse_addresses_internal(addresses): # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, last.network_address <= net.network_address # is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net
22,780
107,491
102
lib/matplotlib/backend_bases.py
25
13
def inaxes(self, xy): axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()] if axes_list: axes = cbook._topmost_artist(axes_list)
DOC: More cleanup axes -> Axes
inaxes
f156db08eee54d285ab0fb4e031e48d078ba6aa3
matplotlib
backend_bases.py
12
8
https://github.com/matplotlib/matplotlib.git
5
56
0
20
92
Python
{ "docstring": "\n Return the topmost visible `~.axes.Axes` containing the point *xy*.\n\n Parameters\n ----------\n xy : (float, float)\n (x, y) pixel positions from left/bottom of the canvas.\n\n Returns\n -------\n `~matplotlib.axes.Axes` or None\n The topmost visible Axes containing the point, or None if there\n is no Axes at the point.\n ", "language": "en", "n_whitespaces": 136, "n_words": 46, "vocab_size": 36 }
def inaxes(self, xy): axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()] if axes_list: axes = cbook._topmost_artist(axes_list) else: axes = None return axes
8,611
45,484
300
airflow/migrations/versions/98271e7606e2_add_scheduling_decision_to_dagrun_and_.py
135
32
def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == "sqlite") is_mssql = bool(conn.dialect.name == "mssql") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True)) batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False) batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True)) with op.batch_alter_table('dag', schema=None) as batch_op: batc
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
upgrade
69f6f9e01b6df76c3c8fa266d460324163957887
airflow
98271e7606e2_add_scheduling_decision_to_dagrun_and_.py
13
34
https://github.com/apache/airflow.git
4
309
0
94
553
Python
{ "docstring": "Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG``\n UPDATE dag SET\n concurrency={concurrency},\n has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()}\n where concurrency IS NULL\n ", "language": "en", "n_whitespaces": 65, "n_words": 22, "vocab_size": 22 }
def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == "sqlite") is_mssql = bool(conn.dialect.name == "mssql") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True)) batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False) batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True)) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.add_column(sa.Column('next_dagrun', TIMESTAMP, nullable=True)) batch_op.add_column(sa.Column('next_dagrun_create_after', TIMESTAMP, nullable=True)) # Create with nullable and no default, then ALTER to set values, to avoid table level lock batch_op.add_column(sa.Column('concurrency', sa.Integer(), nullable=True)) batch_op.add_column(sa.Column('has_task_concurrency_limits', sa.Boolean(), nullable=True)) batch_op.create_index('idx_next_dagrun_create_after', ['next_dagrun_create_after'], unique=False) try: from airflow.configuration import conf concurrency = conf.getint('core', 'dag_concurrency', fallback=16) except: # noqa concurrency = 16 # Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the # DagParser it will get set to correct value. op.execute( f ) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.alter_column('concurrency', type_=sa.Integer(), nullable=False) batch_op.alter_column('has_task_concurrency_limits', type_=sa.Boolean(), nullable=False) if is_sqlite: op.execute("PRAGMA foreign_keys=on")
591
3,889
147
airbyte-integrations/connectors/source-orb/source_orb/source.py
50
10
def enrich_ledger_entries_with_event_data(self, ledger_entries): # Build up a list of the subset of ledger entries we are expected # to enrich with event metadata. event_id_to_ledger_entry = {} for entry in ledger_entries: maybe_event_id: Optional[str] = entry.get("event_id") if maybe_event_id: event_id_to_ledger_entry[maybe_event_id] = entry # Nothing to enrich; short-circuit if len(event_id_to_ledger_entry) == 0: return ledger_entries
🎉 New Source: Orb (#9985) * V1 of source_orb connector * add boostrap.md file * add clause on Pagination to bootstrap.md * add SUMMARY documentation * add lookback_window_days connector parameter * Add support for start_date parameter * Add ability to transform record in order to un-nest IDs * Add support for extracting event properties based on connector configuration
enrich_ledger_entries_with_event_data
1e0ac30ebdcfce55a5644bcd486044da45c93dd6
airbyte
source.py
11
35
https://github.com/airbytehq/airbyte.git
12
261
0
41
84
Python
{ "docstring": "\n Enriches a list of ledger entries with event metadata (applies only to decrements that\n have an event_id property set, i.e. automated decrements to the ledger applied by Orb).\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 25 }
def enrich_ledger_entries_with_event_data(self, ledger_entries): # Build up a list of the subset of ledger entries we are expected # to enrich with event metadata. event_id_to_ledger_entry = {} for entry in ledger_entries: maybe_event_id: Optional[str] = entry.get("event_id") if maybe_event_id: event_id_to_ledger_entry[maybe_event_id] = entry # Nothing to enrich; short-circuit if len(event_id_to_ledger_entry) == 0: return ledger_entries
413
3,245
164
packages/syft/tests/syft/core/adp/data_subject_ledger_test.py
81
16
def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b"1322" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), "The first value in the cache is incorrect" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), "Has the DP cache been changed?" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), "Has the DP cache been changed?" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), "Has the DP cache bee
Add tests for ledger and cache
test_cache
61f4138eeb028287425f6007d692bf7faa808e75
PySyft
data_subject_ledger_test.py
11
22
https://github.com/OpenMined/PySyft.git
1
139
0
43
211
Python
{ "docstring": "Ensure the most up to date RDP-to-epsilon cache is being used.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b"1322" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), "The first value in the cache is incorrect" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), "Has the DP cache been changed?" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), "Has the DP cache been changed?" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), "Has the DP cache been changed?" assert ( len(ledger._cache_constant2epsilon) >= 1_200_000 ), "Has the cache been changed?"
3,380
20,452
116
pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py
42
14
def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers():
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
get_lexer_for_mimetype
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
__init__.py
13
10
https://github.com/pypa/pipenv.git
6
77
0
31
123
Python
{ "docstring": "Get a lexer for a mimetype.\n\n Raises ClassNotFound if not found.\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 10 }
def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime)
@pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
76,230
260,406
276
sklearn/linear_model/_glm/tests/test_glm.py
127
34
def test_glm_regression(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset params = dict( alpha=alpha, fit_intercept=fit_intercept, # While _GeneralizedLinearRegressor exposes the solver parameter, public # estimators currently do not, and lbfgs is the only solver anyw
TST tight tests for GLMs (#23619) Co-authored-by: Olivier Grisel <[email protected]>
test_glm_regression
9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f
scikit-learn
test_glm.py
14
26
https://github.com/scikit-learn/scikit-learn.git
2
201
1
89
344
Python
{ "docstring": "Test that GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n ", "language": "en", "n_whitespaces": 27, "n_words": 21, "vocab_size": 19 }
def test_glm_regression(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset params = dict( alpha=alpha, fit_intercept=fit_intercept, # While _GeneralizedLinearRegressor exposes the solver parameter, public # estimators currently do not, and lbfgs is the only solver anyway. # TODO: Expose solver as soon as we have a second solver to choose from. # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 5e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) # Same with sample_weight. model = ( clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) ) assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
19,908
100,425
332
setup.py
81
27
def _cuda_check(self): with Popen("nvcc -V", shell=True, stdout=PIPE, stderr=PIPE) as chk: stdout, stderr = chk.communicate() if not stderr: version = re.search(r".*release (?P<cuda>\d+\.\d+)", stdout.decode(locale.getpreferredencoding())) self.cuda_version = version.groupdict().get("cuda", None) locate = "where" if self._os == "windows" else "which" path = os.popen(f"{locate} nvcc").read() if path: path = path.split("
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
_cuda_check
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
setup.py
15
18
https://github.com/deepfakes/faceswap.git
6
149
0
65
271
Python
{ "docstring": " Obtain the location and version of Cuda and populate :attr:`cuda_version` and\n :attr:`cuda_path`\n\n Initially just calls `nvcc -V` to get the installed version of Cuda currently in use.\n If this fails, drills down to more OS specific checking methods.\n ", "language": "en", "n_whitespaces": 67, "n_words": 38, "vocab_size": 31 }
def _cuda_check(self): with Popen("nvcc -V", shell=True, stdout=PIPE, stderr=PIPE) as chk: stdout, stderr = chk.communicate() if not stderr: version = re.search(r".*release (?P<cuda>\d+\.\d+)", stdout.decode(locale.getpreferredencoding())) self.cuda_version = version.groupdict().get("cuda", None) locate = "where" if self._os == "windows" else "which" path = os.popen(f"{locate} nvcc").read() if path: path = path.split("\n")[0] # Split multiple entries and take first found while True: # Get Cuda root folder path, split = os.path.split(path) if split == "bin": break self.cuda_path = path return # Failed to load nvcc, manual check getattr(self, f"_cuda_check_{self._os}")()
8,477
45,097
33
tests/models/test_taskinstance.py
12
7
def test_map_product_same(self, dag_maker, session): outputs =
Implement mapped value unpacking (#21641)
test_map_product_same
46a337c8cda6fcc515fffe9a4e4cc324edaefa0a
airflow
test_taskinstance.py
12
20
https://github.com/apache/airflow.git
2
177
0
12
50
Python
{ "docstring": "Test a mapped task can refer to the same source multiple times.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_map_product_same(self, dag_maker, session): outputs = [] with dag_maker(dag_id="product_same", session=session) as dag:
35,196
152,834
56
modules/deepbooru.py
24
13
def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return["value"] = -1 shared.deepbooru_proces
refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.
get_deepbooru_tags
1f92336be768d235c18a82acb2195b7135101ae7
stable-diffusion-webui
deepbooru.py
9
9
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
2
61
0
23
100
Python
{ "docstring": "\n This method is for running only one image at a time for simple use. Used to the img2img interrogate.\n ", "language": "en", "n_whitespaces": 27, "n_words": 19, "vocab_size": 18 }
def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return["value"] = -1 shared.deepbooru_process_queue.put(pil_image) while shared.deepbooru_process_return["value"] == -1: time.sleep(0.2) release_process() return ret
25,796
116,615
360
tests/unit/test_executor.py
101
25
def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime
support of update command #2454
test_update_from_select
0dadd5cecec68f252a08637f695b0e4b573b316f
mindsdb
test_executor.py
15
37
https://github.com/mindsdb/mindsdb.git
1
135
0
71
247
Python
{ "docstring": "\n update \n pg.table2 \n set\n a1 = df.a,\n c1 = df.c\n from \n (\n SELECT model.a as a, model.b as b, model.p as c\n FROM pg.tasks as t\n JOIN mindsdb.task_model as model\n WHERE t.a=1 \n )\n as df\n where \n table2.a1 = df.a \n and table2.b1 = df.b \n \n # SELECT a, b FROM pg.tasks\n # UNION\n # SELECT b, a FROM pg.tasks\n # ", "language": "en", "n_whitespaces": 410, "n_words": 57, "vocab_size": 38 }
def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == "update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')" # @patch('mindsdb.integrations.handlers.postgres_handler.Handler') # def test_union_type_mismatch(self, mock_handler): # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # # sql = # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments # with pytest.raises(ErSqlWrongArguments): # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb'))
13,942
65,565
27
erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py
38
12
def get_total_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Loo
style: format code with black
get_total_shipments
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
supplier_scorecard_variable.py
13
20
https://github.com/frappe/erpnext.git
2
68
0
33
114
Python
{ "docstring": "Gets the total number of ordered shipments to arrive in the period (based on Purchase Receipts)\n\t\t\tSELECT\n\t\t\t\tCOUNT(po_item.base_amount)\n\t\t\tFROM\n\t\t\t\t`tabPurchase Order Item` po_item,\n\t\t\t\t`tabPurchase Order` po\n\t\t\tWHERE\n\t\t\t\tpo.supplier = %(supplier)s\n\t\t\t\tAND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s\n\t\t\t\tAND po_item.docstatus = 1\n\t\t\t\tAND po_item.parent = po.name", "language": "en", "n_whitespaces": 33, "n_words": 44, "vocab_size": 37 }
def get_total_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Look up all PO Items with delivery dates between our dates data = frappe.db.sql( , {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0, )[0][0] if not data: data = 0 return data
48,243
196,909
17
sympy/utilities/source.py
8
6
def source(object): print('In file: %s' % inspect.getsourcefile(
Update the deprecation for source()
source
3a56f9bb1642dda441f65b3713635a8e98150247
sympy
source.py
10
3
https://github.com/sympy/sympy.git
1
26
0
8
48
Python
{ "docstring": "\n Prints the source code of a given object.\n\n .. deprecated:: 1.3\n\n The ``source()`` function is deprecated. Use ``inspect.getsource()`` or\n ``??`` in IPython/Jupyter instead.\n\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 23 }
def source(object): print('In file: %s' % inspect.getsourcefile(object)) print(inspect.getsource(object))
81,856
277,080
272
keras/utils/tf_utils.py
98
16
def validate_axis(axis, input_shape): input_shape = tf.TensorShape(input_shape) rank = input_shape.rank if not rank: raise ValueError( f"Input has undefined rank. Received: input_shape={input_shape}" ) # Convert axis to list and resolve negatives if isinstance(axis, int): axis = [axis] else: axis = list(axis) for idx, x in enumerate(axis): if x < 0: axis[idx] = rank + x # Va
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
validate_axis
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
tf_utils.py
16
24
https://github.com/keras-team/keras.git
9
119
0
65
224
Python
{ "docstring": "Validate an axis value and returns its standardized form.\n\n Args:\n axis: Value to validate. Can be an integer or a list/tuple of integers.\n Integers may be negative.\n input_shape: Reference input shape that the axis/axes refer to.\n\n Returns:\n Normalized form of `axis`, i.e. a list with all-positive values.\n ", "language": "en", "n_whitespaces": 78, "n_words": 47, "vocab_size": 43 }
def validate_axis(axis, input_shape): input_shape = tf.TensorShape(input_shape) rank = input_shape.rank if not rank: raise ValueError( f"Input has undefined rank. Received: input_shape={input_shape}" ) # Convert axis to list and resolve negatives if isinstance(axis, int): axis = [axis] else: axis = list(axis) for idx, x in enumerate(axis): if x < 0: axis[idx] = rank + x # Validate axes for x in axis: if x < 0 or x >= rank: raise ValueError( "Invalid value for `axis` argument. " "Expected 0 <= axis < inputs.rank (with " f"inputs.rank={rank}). Received: axis={tuple(axis)}" ) if len(axis) != len(set(axis)): raise ValueError(f"Duplicate axis: {tuple(axis)}") return axis
3,351
20,375
232
pipenv/patched/notpip/_vendor/pygments/formatters/latex.py
47
9
def _filter_to(self, it, pred):
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_filter_to
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
latex.py
13
15
https://github.com/pypa/pipenv.git
6
70
0
25
116
Python
{ "docstring": " Keep only the tokens that match `pred`, merge the others together ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 10 }
def _filter_to(self, it, pred): buf = '' idx = 0 for i, t, v in it: if pred(t): if buf: yield idx, None, buf buf = '' yield i, t, v else: if not buf: idx = i buf += v if buf: yield idx, None, buf
90,371
291,261
20
homeassistant/components/mqtt/mixins.py
6
5
def entity_registry_enabled_default(self) -> bool: return bool(self._config[CONF_ENABLED_BY_DEFAULT])
Strict type hints for MQTT integration (#82317) * Strict type hints for MQTT integration * Fix errors * Additional corrections * Use cv.template to avoid untyped calls * Enable strict typing policy for MQTT integration * Use ignore[no-untyped-call] * Use # type: ignore[unreachable] * Correct cast * Refactor getting discovery_payload * Remove unused type ignore comments
entity_registry_enabled_default
8a8732f0bc2a7cd891a3ddaff3edbe9c246d6ebf
core
mixins.py
9
3
https://github.com/home-assistant/core.git
1
18
0
6
31
Python
{ "docstring": "Return if the entity should be enabled when first added to the entity registry.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
def entity_registry_enabled_default(self) -> bool: return bool(self._config[CONF_ENABLED_BY_DEFAULT])
47,800
196,300
139
sympy/geometry/polygon.py
62
15
def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segme
Updated import locations
bisectors
498015021131af4dbb07eb110e5badaba8250c7b
sympy
polygon.py
14
8
https://github.com/sympy/sympy.git
2
143
0
53
213
Python
{ "docstring": "The angle bisectors of the triangle.\n\n An angle bisector of a triangle is a straight line through a vertex\n which cuts the corresponding angle in half.\n\n Returns\n =======\n\n bisectors : dict\n Each key is a vertex (Point) and each value is the corresponding\n bisector (Segment).\n\n See Also\n ========\n\n sympy.geometry.point.Point, sympy.geometry.line.Segment\n\n Examples\n ========\n\n >>> from sympy import Point, Triangle, Segment\n >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)\n >>> t = Triangle(p1, p2, p3)\n >>> from sympy import sqrt\n >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))\n True\n\n ", "language": "en", "n_whitespaces": 232, "n_words": 91, "vocab_size": 63 }
def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0]) l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0]) return {v[0]: l1, v[1]: l2, v[2]: l3}
74,890
256,545
523
ui/utils.py
124
30
def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: url = f"{API_ENDPOINT}/{DOC_REQUEST}" params = {"filters": filters, "Retriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} req = {"query": query, "params": params} response_raw = requests.post(url, json=req) if response_raw.status_code >= 400 and response_raw.status_code != 503: raise Exception(f"{vars(response_raw)}") response =
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
query
a59bca366174d9c692fa19750c24d65f47660ef7
haystack
utils.py
19
40
https://github.com/deepset-ai/haystack.git
8
297
0
89
521
Python
{ "docstring": "\n Send a query to the REST API and parse the answer.\n Returns both a ready-to-use representation of the results and the raw JSON.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 18 }
def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: url = f"{API_ENDPOINT}/{DOC_REQUEST}" params = {"filters": filters, "Retriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} req = {"query": query, "params": params} response_raw = requests.post(url, json=req) if response_raw.status_code >= 400 and response_raw.status_code != 503: raise Exception(f"{vars(response_raw)}") response = response_raw.json() if "errors" in response: raise Exception(", ".join(response["errors"])) # Format response results = [] answers = response["answers"] for answer in answers: if answer.get("answer", None): results.append( { "context": "..." + answer["context"] + "...", "answer": answer.get("answer", None), "source": answer["meta"]["name"], "relevance": round(answer["score"] * 100, 2), "document": [doc for doc in response["documents"] if doc["id"] == answer["document_id"]][0], "offset_start_in_doc": answer["offsets_in_document"][0]["start"], "_raw": answer, } ) else: results.append( { "context": None, "answer": None, "document": None, "relevance": round(answer["score"] * 100, 2), "_raw": answer, } ) return results, response
97,491
298,548
15
homeassistant/components/daikin/climate.py
9
6
def format_target_temperature(target_temperature): return str(round(float(target_temperature) * 2, 0) / 2).r
Daikin AC : Round to nearest half degree (#70446) (#70452)
format_target_temperature
e2bbdb26be42d9b82538f5964819489e6f7aa656
core
climate.py
17
2
https://github.com/home-assistant/core.git
1
33
0
9
59
Python
{ "docstring": "Format target temperature to be sent to the Daikin unit, rounding to nearest half degree.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 13 }
def format_target_temperature(target_temperature): return str(round(float(target_temperature) * 2, 0) / 2).rstrip("0").rstrip(".")
14,468
67,274
11
erpnext/regional/report/uae_vat_201/uae_vat_201.py
16
7
def get_data(filters=None): data = [] emirates, amounts_by_emirate = append_vat_on_sales(data, filters) append_vat_on_expenses(data, filters)
style: format code with black
get_data
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
uae_vat_201.py
8
5
https://github.com/frappe/erpnext.git
1
34
0
12
55
Python
{ "docstring": "Returns the list of dictionaries. Each dictionary is a row in the datatable and chart data.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
def get_data(filters=None): data = [] emirates, amounts_by_emirate = append_vat_on_sales(data, filters) append_vat_on_expenses(data, filters) return data, emirates, amounts_by_emirate
11,620
57,112
30
src/prefect/utilities/callables.py
9
6
def dict(self, *args, **kwargs): kwargs.setdefault("exclude_none", True) r
Move parameter schema utilities to prefect.utilites.callables
dict
b13e269bdebd6248023455e7f1ccb24669cbfe3e
prefect
callables.py
9
3
https://github.com/PrefectHQ/prefect.git
1
33
0
9
56
Python
{ "docstring": "Exclude `None` fields by default to comply with\n the OpenAPI spec.\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
def dict(self, *args, **kwargs): kwargs.setdefault("exclude_none", True) return super().dict(*args, **kwargs)
443
3,326
353
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/stream_processor.py
83
24
def extract_column_names(self) -> Dict[str, Tuple[str, str]]: fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=True) if field_name
🐛 Fix normalization issue with quoted & case sensitive columns (#9317)
extract_column_names
c5d4a973631ccae7918b9d7881f875a265f30619
airbyte
stream_processor.py
16
28
https://github.com/airbytehq/airbyte.git
7
178
0
51
294
Python
{ "docstring": "\n Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names\n\n The mapped value to a field property is a tuple where:\n - the first value is the normalized \"raw\" column name\n - the second value is the normalized quoted column name to be used in jinja context\n ", "language": "en", "n_whitespaces": 92, "n_words": 54, "vocab_size": 38 }
def extract_column_names(self) -> Dict[str, Tuple[str, str]]: fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=True) if field_name_lookup not in field_names: break field_names.add(field_name_lookup) result[field] = (field_name, jinja_name) return result
41,976
176,574
311
networkx/algorithms/shortest_paths/generic.py
88
19
def _build_paths_from_predecessors(sources, target, pred): if
Add a space in an error (#5601) * Add a space in an error * Fix style errors
_build_paths_from_predecessors
db20f63bd3f16dedb6c660dbc6fbc89e89892c82
networkx
generic.py
17
25
https://github.com/networkx/networkx.git
8
170
0
62
271
Python
{ "docstring": "Compute all simple paths to target, given the predecessors found in\n pred, terminating when any source in sources is found.\n\n Parameters\n ----------\n sources : set\n Starting nodes for path.\n\n target : node\n Ending node for path.\n\n pred : dict\n A dictionary of predecessor lists, keyed by node\n\n Returns\n -------\n paths : generator of lists\n A generator of all paths between source and target.\n\n Raises\n ------\n NetworkXNoPath\n If `target` cannot be reached from `source`.\n\n Notes\n -----\n There may be many paths between the sources and target. If there are\n cycles among the predecessors, this function will not produce all\n possible paths because doing so would produce infinitely many paths\n of unbounded length -- instead, we only produce simple paths.\n\n See Also\n --------\n shortest_path\n single_source_shortest_path\n all_pairs_shortest_path\n all_shortest_paths\n bellman_ford_path\n ", "language": "en", "n_whitespaces": 237, "n_words": 126, "vocab_size": 92 }
def _build_paths_from_predecessors(sources, target, pred): if target not in pred: raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") seen = {target} stack = [[target, 0]] top = 0 while top >= 0: node, i = stack[top] if node in sources: yield [p for p, n in reversed(stack[: top + 1])] if len(pred[node]) > i: stack[top][1] = i + 1 next = pred[node][i] if next in seen: continue else: seen.add(next) top += 1 if top == len(stack): stack.append([next, 0]) else: stack[top][:] = [next, 0] else: seen.discard(node) top -= 1
86,935
287,747
25
homeassistant/components/bluetooth/models.py
11
4
def is_connected(self) -> bool: return self._backend is not None and self._backend.is_connected
Update to bleak 0.18.0 (#79008)
is_connected
1b144c0e4dd683e3b47668a89da5eb6da4ae5e08
core
models.py
8
3
https://github.com/home-assistant/core.git
2
21
0
11
35
Python
{ "docstring": "Return True if the client is connected to a device.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def is_connected(self) -> bool: return self._backend is not None and self._backend.is_connected
3,823
21,422
120
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
42
14
def _create_gnu_long_header(cls, name, type, encoding, errors): name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_he
Vendor in pip 22.1.2
_create_gnu_long_header
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
tarfile.py
9
9
https://github.com/pypa/pipenv.git
1
78
0
32
126
Python
{ "docstring": "Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence\n for name.\n ", "language": "en", "n_whitespaces": 25, "n_words": 8, "vocab_size": 8 }
def _create_gnu_long_header(cls, name, type, encoding, errors): name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name)
49,318
199,652
82
sympy/polys/appellseqs.py
43
15
def genocchi_poly(n, x=None, polys=False): if n < 0: raise ValueError("Cannot generate Genocchi polynomial of degree %s" % (n-1)) poly = DMP(dup_genocchi(int(n), ZZ),
Custom Appell sequence functions and a doctest
genocchi_poly
93e4d381d35cd4c21a3a8d713c157f8fb21f725b
sympy
appellseqs.py
14
9
https://github.com/sympy/sympy.git
4
87
0
36
139
Python
{ "docstring": "Generates the Genocchi polynomial `\\operatorname{G}_n(x)`.\n\n `\\operatorname{G}_n(x)` is twice the difference between the plain and\n central Bernoulli polynomials, so has degree `n-1`:\n\n .. math :: \\operatorname{G}_n(x) = 2 (\\operatorname{B}_n(x) -\n \\operatorname{B}_n^c(x))\n\n The factor of 2 in the definition endows `\\operatorname{G}_n(x)` with\n integer coefficients.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial plus one.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 128, "n_words": 70, "vocab_size": 58 }
def genocchi_poly(n, x=None, polys=False): if n < 0: raise ValueError("Cannot generate Genocchi polynomial of degree %s" % (n-1)) poly = DMP(dup_genocchi(int(n), ZZ), ZZ) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr()
26,585
119,314
237
jax/_src/third_party/scipy/signal_helper.py
118
18
def _triage_segments(window, nperseg, input_length): # parse window; if array like, then set nperseg = win.shape if isinstance(window, (str, tuple)): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn(f'nperseg = {nperseg} is greater than input length ' f' = {input_length}, using nperseg = {nperseg}') nperseg = input_length win = jnp.array(osp_signal.get_window(window, nperseg)) else: win = jnp.asarray(window) if len(win.shape) != 1: raise Va
Add some functions for spectral analysis. This commit adds "stft", "csd", and "welch" functions in scipy.signal.
_triage_segments
e085370ec4137cf0f73c5163cb664bc4e1c46082
jax
signal_helper.py
17
22
https://github.com/google/jax.git
9
142
0
69
248
Python
{ "docstring": "\n Parses window and nperseg arguments for spectrogram and _spectral_helper.\n This is a helper function, not meant to be called externally.\n Parameters\n ----------\n window : string, tuple, or ndarray\n If window is specified by a string or tuple and nperseg is not\n specified, nperseg is set to the default of 256 and returns a window of\n that length.\n If instead the window is array_like and nperseg is not specified, then\n nperseg is set to the length of the window. A ValueError is raised if\n the user supplies both an array_like window and a value for nperseg but\n nperseg does not equal the length of the window.\n nperseg : int\n Length of each segment\n input_length: int\n Length of input signal, i.e. x.shape[-1]. Used to test for errors.\n Returns\n -------\n win : ndarray\n window. If function was called with string or tuple than this will hold\n the actual array used as a window.\n nperseg : int\n Length of each segment. If window is str or tuple, nperseg is set to\n 256. If window is array_like, nperseg is set to the length of the\n 6\n window.\n ", "language": "en", "n_whitespaces": 270, "n_words": 182, "vocab_size": 88 }
def _triage_segments(window, nperseg, input_length): # parse window; if array like, then set nperseg = win.shape if isinstance(window, (str, tuple)): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn(f'nperseg = {nperseg} is greater than input length ' f' = {input_length}, using nperseg = {nperseg}') nperseg = input_length win = jnp.array(osp_signal.get_window(window, nperseg)) else: win = jnp.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError("value specified for nperseg is different" " from length of window") return win, nperseg
6,345
34,811
113
src/transformers/modeling_utils.py
57
21
def register_for_auto_class(cls, auto_class="AutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTra
Save code of registered custom models (#15379) * Allow dynamic modules to use relative imports * Work for configs * Fix last merge conflict * Save code of registered custom objects * Map strings to strings * Fix test * Add tokenizer * Rework tests * Tests * Ignore fixtures py files for tests * Tokenizer test + fix collection * With full path * Rework integration * Fix typo * Remove changes in conftest * Test for tokenizers * Add documentation * Update docs/source/custom_models.mdx Co-authored-by: Lysandre Debut <[email protected]> * Add file structure and file content * Add more doc * Style * Update docs/source/custom_models.mdx Co-authored-by: Suraj Patil <[email protected]> * Address review comments Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Suraj Patil <[email protected]>
register_for_auto_class
44b21f117bcf71e3d88a11c3523c94b27949fdbf
transformers
modeling_utils.py
11
7
https://github.com/huggingface/transformers.git
3
52
0
47
150
Python
{ "docstring": "\n Register this class with a given auto class. This should only be used for custom models as the ones in the\n library are already mapped with an auto class.\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoModel\"`):\n The auto class to register this new model with.\n ", "language": "en", "n_whitespaces": 102, "n_words": 47, "vocab_size": 39 }
def register_for_auto_class(cls, auto_class="AutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="AutoModel", object_files="model checkpoint" )
120,941
336,999
120
src/diffusers/utils/import_utils.py
66
20
def is_accelerate_available(): return _accelerate_available # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore INFLECT_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore ONNX_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = # docstyle-ignore UNIDECODE_IMP
add accelerate to load models with smaller memory footprint (#361) * add accelerate to load models with smaller memory footprint * remove low_cpu_mem_usage as it is reduntant * move accelerate init weights context to modelling utils * add test to ensure results are the same when loading with accelerate * add tests to ensure ram usage gets lower when using accelerate * move accelerate logic to single snippet under modelling utils and remove it from configuration utils * format code using to pass quality check * fix imports with isor * add accelerate to test extra deps * only import accelerate if device_map is set to auto * move accelerate availability check to diffusers import utils * format code Co-authored-by: Patrick von Platen <[email protected]>
is_accelerate_available
4d1cce2fd01056515f0f353322a231164a4a5c5d
diffusers
import_utils.py
9
2
https://github.com/huggingface/diffusers.git
1
6
0
44
199
Python
{ "docstring": "\n{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://github.com/google/flax and follow the ones that match your environment.\n\n{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install\ninflect`\n\n{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.\n\n{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip\ninstall onnxruntime`\n\n{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install\nscipy`\n\n{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://www.tensorflow.org/install and follow the ones that match your environment.\n\n{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip\ninstall transformers`\n\n{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install\nUnidecode`\n", "language": "en", "n_whitespaces": 181, "n_words": 197, "vocab_size": 44 }
def is_accelerate_available(): return _accelerate_available # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore INFLECT_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore ONNX_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = # docstyle-ignore UNIDECODE_IMPORT_ERROR = BACKENDS_MAPPING = OrderedDict( [ ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ] )
23,143
108,330
41
lib/matplotlib/text.py
13
7
def set_horizontalalignment(self, align): _api.check_i
Document text alignment Closes #21571.
set_horizontalalignment
c0cb163c627fe52e38311954226e3349f34f6914
matplotlib
text.py
9
4
https://github.com/matplotlib/matplotlib.git
1
34
0
12
59
Python
{ "docstring": "\n Set the horizontal alignment relative to the anchor point.\n\n See also :doc:`/gallery/text_labels_and_annotations/text_alignment`.\n\n Parameters\n ----------\n align : {'left', 'center', 'right'}\n ", "language": "en", "n_whitespaces": 62, "n_words": 19, "vocab_size": 18 }
def set_horizontalalignment(self, align): _api.check_in_list(['center', 'right', 'left'], align=align) self._horizontalalignment = align self.stale = True
56,128
220,817
14
python3.10.4/Lib/asyncio/tasks.py
9
4
def _wrap_awaitable(awaitable): return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine
add python 3.10.4 for windows
_wrap_awaitable
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
tasks.py
9
2
https://github.com/XX-net/XX-Net.git
1
16
0
9
37
Python
{ "docstring": "Helper for asyncio.ensure_future().\n\n Wraps awaitable (an object with __await__) into a coroutine\n that will later be wrapped in a Task by ensure_future().\n ", "language": "en", "n_whitespaces": 31, "n_words": 22, "vocab_size": 21 }
def _wrap_awaitable(awaitable): return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine
34,216
148,280
31
python/ray/_private/thirdparty/pathspec/util.py
43
10
def normalize_file(file, separators=None): # Normalize path separators.
[Bugfix] fix invalid excluding of Black (#24042) - We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options - Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.
normalize_file
0e6c042e29cbbe429d81c9c1af3c75c261f00980
ray
util.py
11
9
https://github.com/ray-project/ray.git
4
58
0
32
98
Python
{ "docstring": "\n\tNormalizes the file path to use the POSIX path separator (i.e., ``'/'``).\n\n\t*file* (:class:`str` or :class:`pathlib.PurePath`) is the file path.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tThis does not need to include the POSIX path separator (``'/'``), but\n\tincluding it will not affect the results. Default is :data:`None` for\n\t:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty\n\tcontainer (e.g., an empty tuple ``()``).\n\n\tReturns the normalized file path (:class:`str`).\n\t", "language": "en", "n_whitespaces": 66, "n_words": 75, "vocab_size": 54 }
def normalize_file(file, separators=None): # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS # Convert path object to string. norm_file = str(file) for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file
73,332
250,235
29
synapse/types/state.py
15
8
def wildcard_types(self) -> List[str]: return [t for t, state_keys in self.types.items() if state_keys is No
Allow selecting "prejoin" events by state keys (#14642) * Declare new config * Parse new config * Read new config * Don't use trial/our TestCase where it's not needed Before: ``` $ time trial tests/events/test_utils.py > /dev/null real 0m2.277s user 0m2.186s sys 0m0.083s ``` After: ``` $ time trial tests/events/test_utils.py > /dev/null real 0m0.566s user 0m0.508s sys 0m0.056s ``` * Helper to upsert to event fields without exceeding size limits. * Use helper when adding invite/knock state Now that we allow admins to include events in prejoin room state with arbitrary state keys, be a good Matrix citizen and ensure they don't accidentally create an oversized event. * Changelog * Move StateFilter tests should have done this in #14668 * Add extra methods to StateFilter * Use StateFilter * Ensure test file enforces typed defs; alphabetise * Workaround surprising get_current_state_ids * Whoops, fix mypy
wildcard_types
e2a1adbf5d11288f2134ced1f84c6ffdd91a9357
synapse
state.py
10
8
https://github.com/matrix-org/synapse.git
3
31
0
14
50
Python
{ "docstring": "Returns a list of event types which require us to fetch all state keys.\n This will be empty unless `has_wildcards` returns True.\n\n Returns:\n A list of event types.\n ", "language": "en", "n_whitespaces": 60, "n_words": 28, "vocab_size": 25 }
def wildcard_types(self) -> List[str]: return [t for t, state_keys in self.types.items() if state_keys is None]
25,438
115,389
45
mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py
17
7
def get_columns(self, table_name) -> Response:
Add snowflake connector
get_columns
0e22eac78f7dd836a0e16b343d1bd02d039a3b6b
mindsdb
snowflake_handler.py
8
7
https://github.com/mindsdb/mindsdb.git
1
24
0
15
45
Python
{ "docstring": "\n List the columns in the tabels for which the user have access\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
def get_columns(self, table_name) -> Response: q = f"SHOW COLUMNS IN TABLE {table_name};" result = self.native_query(q) return result
13,939
65,562
27
erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py
38
12
def get_on_time_shipments(scorecard): supplier = frappe.get_d
style: format code with black
get_on_time_shipments
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
supplier_scorecard_variable.py
13
26
https://github.com/frappe/erpnext.git
2
68
0
33
114
Python
{ "docstring": "Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs)\n\t\t\tSELECT\n\t\t\t\tCOUNT(pr_item.qty)\n\t\t\tFROM\n\t\t\t\t`tabPurchase Order Item` po_item,\n\t\t\t\t`tabPurchase Receipt Item` pr_item,\n\t\t\t\t`tabPurchase Order` po,\n\t\t\t\t`tabPurchase Receipt` pr\n\t\t\tWHERE\n\t\t\t\tpo.supplier = %(supplier)s\n\t\t\t\tAND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s\n\t\t\t\tAND po_item.schedule_date <= pr.posting_date\n\t\t\t\tAND po_item.qty = pr_item.qty\n\t\t\t\tAND pr_item.docstatus = 1\n\t\t\t\tAND pr_item.purchase_order_item = po_item.name\n\t\t\t\tAND po_item.parent = po.name\n\t\t\t\tAND pr_item.parent = pr.name", "language": "en", "n_whitespaces": 52, "n_words": 69, "vocab_size": 51 }
def get_on_time_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Look up all PO Items with delivery dates between our dates total_items_delivered_on_time = frappe.db.sql( , {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0, )[0][0] if not total_items_delivered_on_time: total_items_delivered_on_time = 0 return total_items_delivered_on_time
17,931
85,099
23
zerver/webhooks/bitbucket3/tests.py
9
5
def test_commit_comment_deleted(self) -> None: expected_message = self.check_webhook("commit_comment_deleted", TOPIC, expected_message)
webhooks: Pick a more reasonable length for short sha. 7 characters are not enough for large projects, so we change it to reasonably longer. As an example, The Linux kernel needs at least 11 characters of sha in its shortened form to identify a revision. We pick 11 so it should work for most of the projects. Signed-off-by: Zixuan James Li <[email protected]>
test_commit_comment_deleted
4e4689949438735622bdf669f05d218c671e7e01
zulip
tests.py
8
3
https://github.com/zulip/zulip.git
1
20
0
9
38
Python
{ "docstring": "[hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b67f1f](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\\n~~~ quote\\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\\n~~~", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 15 }
def test_commit_comment_deleted(self) -> None: expected_message = self.check_webhook("commit_comment_deleted", TOPIC, expected_message)
11,760
58,372
235
src/prefect/agent.py
42
16
async def get_work_queues(self) -> Optional[UUID]: work_queues = [] for name in self.work_queues: try: # support IDs and names if isinstance(name, UUID): work_queue = await self.client.read_work_queue(id=name) else: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: work_queue = await self.client.create_work_queue( name=name, return_id=False
Agents support multiple queues
get_work_queues
8a4560e237b90a7b64c6bb77b6cb3ee9a6648e33
prefect
agent.py
17
17
https://github.com/PrefectHQ/prefect.git
4
86
0
34
142
Python
{ "docstring": "\n Loads the work queue objects corresponding to the agent's target work queues. If any of them don't exist, they are created.\n ", "language": "en", "n_whitespaces": 36, "n_words": 21, "vocab_size": 19 }
async def get_work_queues(self) -> Optional[UUID]: work_queues = [] for name in self.work_queues: try: # support IDs and names if isinstance(name, UUID): work_queue = await self.client.read_work_queue(id=name) else: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: work_queue = await self.client.create_work_queue( name=name, return_id=False ) work_queues.append(work_queue) return work_queues
458
3,356
53
airbyte-cdk/python/unit_tests/sources/test_abstract_source.py
26
24
def test_read_nonexistent_stream_raises_exception(mocker): s1 = MockStream(name="s1") s2 = MockStream(name="this_stream_doesnt_exist_in_the_source") mocker.patch.object(MockStream, "get_json_schema", return_value={})
CDK: Fix typing errors (#9037) * fix typing, drop AirbyteLogger * format * bump the version * use logger instead of fixture logger Co-authored-by: Eugene Kulak <[email protected]> Co-authored-by: auganbay <[email protected]>
test_read_nonexistent_stream_raises_exception
f83eca58eaf2129d21b5796a301732ab22675130
airbyte
test_abstract_source.py
13
8
https://github.com/airbytehq/airbyte.git
1
86
0
22
150
Python
{ "docstring": "Tests that attempting to sync a stream which the source does not return from the `streams` method raises an exception", "language": "en", "n_whitespaces": 19, "n_words": 20, "vocab_size": 19 }
def test_read_nonexistent_stream_raises_exception(mocker): s1 = MockStream(name="s1") s2 = MockStream(name="this_stream_doesnt_exist_in_the_source") mocker.patch.object(MockStream, "get_json_schema", return_value={}) src = MockSource(streams=[s1]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s2, SyncMode.full_refresh)]) with pytest.raises(KeyError): list(src.read(logger, {}, catalog)) GLOBAL_EMITTED_AT = 1
12,831
62,021
339
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py
89
24
def _should_queue(self, link, referrer, rel): scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions +
upd; format
_should_queue
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
locators.py
13
24
https://github.com/jindongwang/transferlearning.git
9
168
0
51
272
Python
{ "docstring": "\n Determine whether a link URL from a referring page and with a\n particular \"rel\" attribute should be queued for scraping.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
def _should_queue(self, link, referrer, rel): scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result
24,762
112,790
99
nni/runtime/tuner_command_channel/legacy.py
48
13
def receive(): header = _in_file.read(16) _logger.debug('Received command, header: [%s]', header) if header is None or len(header) < 16: # Pipe EOF encountered _logger.
WebSocket (step 1) - Python client (#4806)
receive
f60d3d5e294510d99c65ba3292822cbb922adbf8
nni
legacy.py
10
12
https://github.com/microsoft/nni.git
3
91
0
34
157
Python
{ "docstring": "Receive a command from Training Service.\n Returns a tuple of command (CommandType) and payload (str)\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 13 }
def receive(): header = _in_file.read(16) _logger.debug('Received command, header: [%s]', header) if header is None or len(header) < 16: # Pipe EOF encountered _logger.debug('Pipe EOF encountered') return None, None length = int(header[2:]) data = _in_file.read(length) command = CommandType(header[:2]) data = data.decode('utf8') _logger.debug('Received command, data: [%s]', data) return command, data
@register.filter()
77,732
264,446
17
netbox/utilities/templatetags/builtins/filters.py
12
8
def bettertitle(value): return ' '.join([w[0].upper() + w[1:] for w in value.s
Closes #8600: Document built-in template tags & filters
bettertitle
7c105019d8ae9205051c302e7499b33a455f9176
netbox
filters.py
12
2
https://github.com/netbox-community/netbox.git
2
36
1
12
72
Python
{ "docstring": "\n Alternative to the builtin title(). Ensures that the first letter of each word is uppercase but retains the\n original case of all others.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 20 }
def bettertitle(value): return ' '.join([w[0].upper() + w[1:] for w in value.split()]) @register.filter()
12,009
60,201
161
code/deep/BJMMD/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py
45
11
def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'trai
Balanced joint maximum mean discrepancy for deep transfer learning
load_pascal_annotation
cc4d0564756ca067516f71718a3d135996525909
transferlearning
pascal_multilabel_datalayers.py
12
33
https://github.com/jindongwang/transferlearning.git
2
317
0
41
153
Python
{ "docstring": "\n This code is borrowed from Ross Girshick's FAST-RCNN code\n (https://github.com/rbgirshick/fast-rcnn).\n It parses the PASCAL .xml metadata files.\n See publication for further details: (http://arxiv.org/abs/1504.08083).\n\n Thanks Ross!\n\n ", "language": "en", "n_whitespaces": 44, "n_words": 25, "vocab_size": 24 }
def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename)
36,280
155,189
75
modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py
15
13
def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue):
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <[email protected]>
get
193505fdf0c984743397ba3df56262f30aee13a8
modin
partition.py
10
8
https://github.com/modin-project/modin.git
2
50
0
13
101
Python
{ "docstring": "\n Get the object wrapped by this partition out of the object store.\n\n Returns\n -------\n pandas.DataFrame\n The object from the object store.\n ", "language": "en", "n_whitespaces": 68, "n_words": 21, "vocab_size": 15 }
def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue): self.drain_call_queue() result = UnidistWrapper.materialize(self._data) logger.debug(f"EXIT::Partition.get::{self._identity}") return result
51,981
207,491
120
tests/admin_views/test_actions.py
27
13
def test_custom_function_action_no_perm_response(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], "action": "no_perm", "index": 0, } response = self.client.post( reverse("admin:admin_views_externalsubscriber_changelist
Refs #33476 -- Reformatted code with Black.
test_custom_function_action_no_perm_response
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_actions.py
11
11
https://github.com/django/django.git
1
64
0
25
107
Python
{ "docstring": "A custom action may returns an HttpResponse with a 403 code.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_custom_function_action_no_perm_response(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], "action": "no_perm", "index": 0, } response = self.client.post( reverse("admin:admin_views_externalsubscriber_changelist"), action_data ) self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b"No permission to perform this action")
47,891
196,391
22
sympy/matrices/matrices.py
9
5
def limit(self, *args):
Moved imports to higher level
limit
59d22b6bb7287613d598611027f640d068ca5748
sympy
matrices.py
11
2
https://github.com/sympy/sympy.git
1
25
0
9
44
Python
{ "docstring": "Calculate the limit of each element in the matrix.\n ``args`` will be passed to the ``limit`` function.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x, y\n >>> M = Matrix([[x, y], [1, 0]])\n >>> M.limit(x, 2)\n Matrix([\n [2, y],\n [1, 0]])\n\n See Also\n ========\n\n integrate\n diff\n ", "language": "en", "n_whitespaces": 155, "n_words": 50, "vocab_size": 39 }
def limit(self, *args): return self.applyfunc(lambda x: x.limit(*args)) # https://github.com/sympy/sympy/pull/12854
89,988
290,875
91
tests/components/number/test_init.py
26
12
def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for
Align number and sensor device classes (#81909) * Align number and sensor device classes * Add tests * Tweak tests
test_device_classes_aligned
b6586d5c34bf7ea5c30fbb1b62c438078ea14f39
core
test_init.py
12
11
https://github.com/home-assistant/core.git
3
56
0
23
86
Python
{ "docstring": "Make sure all sensor device classes are also available in NumberDeviceClass.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).value == device_class.value
5,628
30,539
49
tests/test_main.py
15
6
def valid_tess_config(outdir): cfg_file = outdir / 'test.cfg'
tests: Extract some test fixtures for better clarity
valid_tess_config
5d0cc0a092f93640e1d83baaf1c738768481d208
OCRmyPDF
test_main.py
11
11
https://github.com/ocrmypdf/OCRmyPDF.git
1
28
0
14
57
Python
{ "docstring": "\\\nload_system_dawg 0\nlanguage_model_penalty_non_dict_word 0\nlanguage_model_penalty_non_freq_dict_word 0\n", "language": "en", "n_whitespaces": 3, "n_words": 7, "vocab_size": 5 }
def valid_tess_config(outdir): cfg_file = outdir / 'test.cfg' with cfg_file.open('w') as f: f.write( ) yield cfg_file
50,955
204,883
211
django/db/backends/base/operations.py
59
17
def year_lookup_bounds_for_datetime_field(self, value, iso_year=False): if iso_year: first = datetime.datetime.fromisocalendar(value, 1, 1) second = datetime.datetime.fromisocalendar(
Refs #33476 -- Reformatted code with Black.
year_lookup_bounds_for_datetime_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
operations.py
12
16
https://github.com/django/django.git
3
142
0
37
212
Python
{ "docstring": "\n Return a two-elements list with the lower and upper bound to be used\n with a BETWEEN operator to query a DateTimeField value using a year\n lookup.\n\n `value` is an int, containing the looked-up year.\n If `iso_year` is True, return bounds for ISO-8601 week-numbering years.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 37 }
def year_lookup_bounds_for_datetime_field(self, value, iso_year=False): if iso_year: first = datetime.datetime.fromisocalendar(value, 1, 1) second = datetime.datetime.fromisocalendar( value + 1, 1, 1 ) - datetime.timedelta(microseconds=1) else: first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second]
49,350
199,694
20
sympy/polys/orthopolys.py
15
7
def gegenbauer_poly(n, a, x=None, polys=False): r return named_poly(n, dup_ge
Run orthopolys and appellseqs through a common interface Including unifying the two Chebyshev generators into one function. There are also two kinds of Hermite polynomials, and they too share the same recurrence, but the second type He_n(x) (aka the probabilist, reduced or small polynomials) will not be added here.
gegenbauer_poly
d1d46df73ebaad94089847558d00a8b7269f554d
sympy
orthopolys.py
8
15
https://github.com/sympy/sympy.git
1
36
0
15
50
Python
{ "docstring": "Generates the Gegenbauer polynomial `C_n^{(a)}(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n a\n Decides minimal domain for the list of coefficients.\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 82, "n_words": 40, "vocab_size": 32 }
def gegenbauer_poly(n, a, x=None, polys=False): r return named_poly(n, dup_gegenbauer, None, "Gegenbauer polynomial", (x, a), polys)
96,818
297,864
441
homeassistant/components/homekit_controller/connection.py
68
13
async def async_update(self, now=None): if not self.pollable_characteristics: self.async_update_available_state() _LOGGER.debug( "HomeKit connection not polling any characteristics: %s", self.unique_id ) return if self._polling_lock.locked(): if not self._polling_lock_warned: _LOGGER.warning( ( "HomeKit controller update skipped as previous poll still in" " flight: %s" ), self.unique_id, ) self._polling_lock_warned = True return if self._polling_lock_warned: _LOGGER.info( ( "HomeKit controller no longer detecting back pressure - not" "
String formatting and max line length - Part 2 (#84393)
async_update
cb13418babd21a1e9584978b0c523f1b1e4e1cb0
core
connection.py
14
44
https://github.com/home-assistant/core.git
8
177
0
49
158
Python
{ "docstring": "Poll state of all entities attached to this bridge/accessory.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def async_update(self, now=None): if not self.pollable_characteristics: self.async_update_available_state() _LOGGER.debug( "HomeKit connection not polling any characteristics: %s", self.unique_id ) return if self._polling_lock.locked(): if not self._polling_lock_warned: _LOGGER.warning( ( "HomeKit controller update skipped as previous poll still in" " flight: %s" ), self.unique_id, ) self._polling_lock_warned = True return if self._polling_lock_warned: _LOGGER.info( ( "HomeKit controller no longer detecting back pressure - not" " skipping poll: %s" ), self.unique_id, ) self._polling_lock_warned = False
12,324
60,892
162
.venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py
58
16
def _merge(self, start, end, left, right): # type: (int, int, int, int) -> Iterator[Tuple[int, int]] lslice, rslice = self._left[left:right], self._right[left:right] i = start = min([start]+lslice[:1]) end = max([end]+rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: yield i, j-1
upd; format
_merge
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
lazy_wheel.py
12
11
https://github.com/jindongwang/transferlearning.git
4
128
0
43
197
Python
{ "docstring": "Return an iterator of intervals to be fetched.\n\n Args:\n start (int): Start of needed interval\n end (int): End of needed interval\n left (int): Index of first overlapping downloaded data\n right (int): Index after last overlapping downloaded data\n ", "language": "en", "n_whitespaces": 95, "n_words": 37, "vocab_size": 25 }
def _merge(self, start, end, left, right): # type: (int, int, int, int) -> Iterator[Tuple[int, int]] lslice, rslice = self._left[left:right], self._right[left:right] i = start = min([start]+lslice[:1]) end = max([end]+rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: yield i, j-1 i = k + 1 if i <= end: yield i, end self._left[left:right], self._right[left:right] = [start], [end]
78,643
266,891
38
lib/ansible/utils/collection_loader/_collection_finder.py
17
7
def is_python_identifier(self): # type: (str) -> bool # Ref: https://stackoverflow.com/a/55802320/5
Code cleanup for type hinting issues.
is_python_identifier
4867ac217ba0164b433d0927488d153e116d175d
ansible
_collection_finder.py
9
2
https://github.com/ansible/ansible.git
1
18
0
16
47
Python
{ "docstring": "Determine whether the given string is a Python identifier.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def is_python_identifier(self): # type: (str) -> bool # Ref: https://stackoverflow.com/a/55802320/595220 return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self)) PB_EXTENSIONS = ('.yml', '.yaml')
12,333
60,901
84
.venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py
35
12
def _stream_response(self, start, end, base_headers=HEADERS): # type: (int, int, Dict[str, str]) -> Response headers = base_headers.copy() headers['Range'] = f'bytes={start}-{end}'
upd; format
_stream_response
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
lazy_wheel.py
8
5
https://github.com/jindongwang/transferlearning.git
1
53
0
32
97
Python
{ "docstring": "Return HTTP response to a range request from start to end.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def _stream_response(self, start, end, base_headers=HEADERS): # type: (int, int, Dict[str, str]) -> Response headers = base_headers.copy() headers['Range'] = f'bytes={start}-{end}' # TODO: Get range requests to be correctly cached headers['Cache-Control'] = 'no-cache' return self._session.get(self._url, headers=headers, stream=True)
117,562
321,142
856
qutebrowser/browser/webengine/webenginetab.py
203
44
def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}, should be between 0 and " f"{qtutils.MAX_WORLD_ID}") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}") continue new_script.setWorldId(world) # Corresponds to "@run-at document-end" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's pars
Run scripts/dev/rewrite_enums.py
_inject_greasemonkey_scripts
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
webenginetab.py
19
38
https://github.com/qutebrowser/qutebrowser.git
8
232
0
145
453
Python
{ "docstring": "Register user JavaScript files with the current tab.\n\n Args:\n scripts: A list of GreasemonkeyScripts.\n ", "language": "en", "n_whitespaces": 39, "n_words": 14, "vocab_size": 14 }
def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}, should be between 0 and " f"{qtutils.MAX_WORLD_ID}") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}") continue new_script.setWorldId(world) # Corresponds to "@run-at document-end" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a # @run-at comment. new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug( f"Forcing @run-at document-end for {script.name}") new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script)
20,264
100,813
88
plugins/train/model/_base/model.py
26
12
def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_secti
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
config
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
faceswap
model.py
13
9
https://github.com/deepfakes/faceswap.git
2
43
0
23
73
Python
{ "docstring": " dict: The configuration dictionary for current plugin, as set by the user's\n configuration settings. ", "language": "en", "n_whitespaces": 22, "n_words": 14, "vocab_size": 13 }
def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug("Loading config for: %s", model_name) _CONFIG = Config(model_name, configfile=self._configfile).config_dict return _CONFIG
14,342
66,806
29
erpnext/patches/v13_0/stock_entry_enhancements.py
44
12
def execute(): frappe.relo
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
stock_entry_enhancements.py
13
26
https://github.com/frappe/erpnext.git
3
109
0
31
210
Python
{ "docstring": "\n UPDATE `tabStock Entry` SET\n stock_entry_type = 'Material Transfer',\n purpose = 'Material Transfer',\n add_to_transit = 1 WHERE stock_entry_type = 'Send to Warehouse'\n UPDATE `tabStock Entry` SET\n stock_entry_type = 'Material Transfer',\n purpose = 'Material Transfer'\n WHERE stock_entry_type = 'Receive at Warehouse'\n ", "language": "en", "n_whitespaces": 139, "n_words": 39, "vocab_size": 18 }
def execute(): frappe.reload_doc("stock", "doctype", "stock_entry") if frappe.db.has_column("Stock Entry", "add_to_transit"): frappe.db.sql( ) frappe.db.sql( ) frappe.reload_doc("stock", "doctype", "warehouse_type") if not frappe.db.exists("Warehouse Type", "Transit"): doc = frappe.new_doc("Warehouse Type") doc.name = "Transit" doc.insert() frappe.reload_doc("stock", "doctype", "stock_entry_type") frappe.delete_doc_if_exists("Stock Entry Type", "Send to Warehouse") frappe.delete_doc_if_exists("Stock Entry Type", "Receive at Warehouse")
14,415
67,038
43
erpnext/projects/utils.py
53
16
def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = "%%%s%%" % txt order_by_string = "%s%%" % txt match_conditions = build_match_conditions("Task") match_conditions = ("and" + match_conditions) if match_conditions else "" return frappe.db.sql( % (searchfield, "%s", "%s", match_condi
style: format code with black
query_task
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
utils.py
10
18
https://github.com/frappe/erpnext.git
2
96
0
37
150
Python
{ "docstring": "select name, subject from `tabTask`\n\t\twhere (`%s` like %s or `subject` like %s) %s\n\t\torder by\n\t\t\tcase when `subject` like %s then 0 else 1 end,\n\t\t\tcase when `%s` like %s then 0 else 1 end,\n\t\t\t`%s`,\n\t\t\tsubject\n\t\tlimit %s, %s", "language": "en", "n_whitespaces": 33, "n_words": 41, "vocab_size": 25 }
def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = "%%%s%%" % txt order_by_string = "%s%%" % txt match_conditions = build_match_conditions("Task") match_conditions = ("and" + match_conditions) if match_conditions else "" return frappe.db.sql( % (searchfield, "%s", "%s", match_conditions, "%s", searchfield, "%s", searchfield, "%s", "%s"), (search_string, search_string, order_by_string, order_by_string, start, page_len), )
81,318
275,145
735
keras/mixed_precision/policy.py
242
11
def _parse_name(self, name): if name.endswith("_float32_vars"): error_msg = ( "Policies ending in '_float32_vars' have been removed " "from TensorFlow." ) if name in ("infer_float32_vars", "infer_with_float32_vars"): error_msg += ( " Please use the 'mixed_float16' or 'mixed_bfloat16' " "policy instead."
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_parse_name
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
policy.py
13
33
https://github.com/keras-team/keras.git
9
126
0
132
256
Python
{ "docstring": "Parses a Policy name into a compute and variable dtype.\n\n Args:\n name: The name of the policy:\n\n Returns:\n The (compute_dtype, variable_dtype) pair.\n ", "language": "en", "n_whitespaces": 61, "n_words": 22, "vocab_size": 19 }
def _parse_name(self, name): if name.endswith("_float32_vars"): error_msg = ( "Policies ending in '_float32_vars' have been removed " "from TensorFlow." ) if name in ("infer_float32_vars", "infer_with_float32_vars"): error_msg += ( " Please use the 'mixed_float16' or 'mixed_bfloat16' " "policy instead." ) elif name == "float16_with_float32_vars": error_msg += " Please use the 'mixed_float16' policy instead." elif name == "bfloat16_with_float32_vars": error_msg += " Please use the 'mixed_bfloat16' policy instead." error_msg += " Got policy name: '%s'" % name raise ValueError(error_msg) if name == "mixed_float16": return "float16", "float32" elif name == "mixed_bfloat16": return "bfloat16", "float32" elif name == "_infer": # The "_infer" policy exists only for compatibility with TF 1, where # "_infer" is the default. The behavior matches the behavior of TF 1's # behavior before policies were introduced. With "_infer", the computation # and variable dtype are inferred from the first input the first time the # layer is called. Once the layer is called for the first time, the # layer's policy will change to the dtype of the first input, and it will # no longer have the "_infer" policy. # # The infer policy should be considered an implementation detail and may # be removed in the future. return None, None try: dtype = tf.as_dtype(name).name except TypeError: error = ( "Cannot convert value %s to a mixed precision Policy. " "Valid policies include 'mixed_float16', 'mixed_bfloat16', " "and the name of any dtype such as 'float32'." % (name,) ) raise ValueError(error) return dtype, dtype
11,047
54,365
47
src/prefect/engine.py
17
8
def reraise_exceptions_as_crashes(): try: yield except
Move state utilities to `prefect.states`
reraise_exceptions_as_crashes
1a3defa3a4ee74fcea9ae5fa4edf6e5eed134930
prefect
engine.py
12
6
https://github.com/PrefectHQ/prefect.git
2
38
0
17
64
Python
{ "docstring": "\n Detect crashes during this context, wrapping unexpected exceptions into `Crash`\n signals.\n ", "language": "en", "n_whitespaces": 21, "n_words": 11, "vocab_size": 11 }
def reraise_exceptions_as_crashes(): try: yield except BaseException as exc: state = exception_to_crashed_state(exc) raise Crash(message=state.message, cause=exc, state=state) from exc
8,135
44,069
132
scripts/in_container/run_resource_check.py
60
27
def resoure_check(): MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print("\nChecking r
Verify enough resources for breeze (#20763) Verify resources, memory, cpus and disk for Docker in Python.
resoure_check
75755d7f65fb06c6e2e74f805b877774bfa7fcda
airflow
run_resource_check.py
11
17
https://github.com/apache/airflow.git
1
123
0
45
206
Python
{ "docstring": "\n Use gsutil to get resources in bytes for memory and disk\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
def resoure_check(): MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print("\nChecking resources.\n") # Memory current available svmem = psutil.virtual_memory() mem_available = get_size(svmem.available) # Cpus current available cpus_available = psutil.cpu_count(logical=True) # Disk current available partitions = psutil.disk_partitions() partition_usage = psutil.disk_usage(partitions[0].mountpoint) disk_available = get_size(partition_usage.free) resources: Dict[str, Resource] = { 'Memory': Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY), 'Cpus': Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS), 'Disk': Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK), } return resources