complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
get_word_index
def get_word_index(path="reuters_word_index.json"): origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "reuters_word_index.json", file_hash="4d44cc38712099c9e383dc6e5f11a921", ) with open(path) as f: return json.load(f)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
reuters.py
83
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,402
0
75
45
20
270,139
22
keras
10
keras/datasets/reuters.py
Python
11
{ "docstring": "Retrieves a dict mapping words to their index in the Reuters dataset.\n\n Args:\n path: where to cache the data (relative to `~/.keras/dataset`).\n\n Returns:\n The word index dictionary. Keys are word strings, values are their index.\n ", "language": "en", "n_whitespaces": 58, "n_words": 35, "vocab_size": 28 }
https://github.com/keras-team/keras.git
3
test_flow_run_respects_extra_loggers
async def test_flow_run_respects_extra_loggers(orion_client, logger_test_deployment): flow_run = await orion_client.create_flow_run_from_deployment( logger_test_deployment ) await SubprocessFlowRunner( env={"PREFECT_LOGGING_EXTRA_LOGGERS": "foo"} ).submit_flow_run(flow_run, MagicMock(spec=anyio.abc.TaskStatus)) state = (await orion_client.read_flow_run(flow_run.id)).state settings = await orion_client.resolve_datadoc(state.result()) api_logs = await orion_client.read_logs() api_log_messages = [log.message for log in api_logs] prefect_logger = logging.getLogger("prefect") # Configures 'foo' to match 'prefect' assert settings["foo"]["handlers"] == [ handler.name for handler in prefect_logger.handlers ] assert settings["foo"]["level"] == prefect_logger.level assert "Hello from foo" in api_log_messages # Does not configure 'bar' assert settings["bar"]["handlers"] == [] assert settings["bar"]["level"] == logging.NOTSET assert "Hello from bar" not in api_log_messages @pytest.mark.parametrize("name", ["default", None, ""])
b110baccdbfde300f410b069c873e8b2a2c98e00
@pytest.mark.parametrize("name", ["default", None, ""])
14
test_logging.py
311
Add test
10,716
1
165
163
62
53,099
88
prefect
35
tests/test_logging.py
Python
20
{ "docstring": "\n Runs a flow in a subprocess to check that PREFECT_LOGGING_EXTRA_LOGGERS works as\n intended. This avoids side-effects of modifying the loggers in this test run without\n confusing mocking.\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 25 }
https://github.com/PrefectHQ/prefect.git
5
save
def save(self): s = self._read_from_storage() # type: _Settings for k, v in self.__dict__.items(): if k[0] == '_': continue if hasattr(s, k): setattr(s, k, v) log.debug("_ConfigSQL updating storage") self._session.merge(s) try: self._session.commit() except OperationalError as e: log.error('Database error: %s', e) self._session.rollback() self.load()
4ea80e9810a14ca3617f08a4ae5cfa6b50482e9a
11
config_sql.py
170
Code cosmetics
40,839
0
182
99
38
173,342
40
calibre-web
20
cps/config_sql.py
Python
15
{ "docstring": "Apply all configuration values to the underlying storage.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/janeczku/calibre-web.git
3
try_all_gpus
def try_all_gpus(): num_gpus = len(tf.config.experimental.list_physical_devices('GPU')) devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)] return devices if devices else [tf.device('/CPU:0')]
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
12
tensorflow.py
91
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
37,441
0
30
51
15
158,288
18
d2l-zh
11
d2l/tensorflow.py
Python
4
{ "docstring": "Return all available GPUs, or [cpu(),] if no GPU exists.\n\n Defined in :numref:`sec_use_gpu`", "language": "en", "n_whitespaces": 15, "n_words": 13, "vocab_size": 13 }
https://github.com/d2l-ai/d2l-zh.git
1
gen_fake_dataset
def gen_fake_dataset(): # For fake dataset, since the dataset is randomized, we create it once on the # driver, and then send the same dataset to all the training workers. # Use 10% of nodes for validation and 10% for testing. fake_dataset = FakeDataset(transform=RandomNodeSplit(num_val=0.1, num_test=0.1))
732175e2458ec671c80484bcd35150dcdc7e600b
12
distributed_sage_example.py
41
[AIR] Add distributed `torch_geometric` example (#23580) Add example for distributed pytorch geometric (graph learning) with Ray AIR This only showcases distributed training, but with data small enough that it can be loaded in by each training worker individually. Distributed data ingest is out of scope for this PR. Co-authored-by: matthewdeng <[email protected]>
34,236
0
60
30
36
148,314
45
ray
7
python/ray/ml/examples/pytorch_geometric/distributed_sage_example.py
Python
4
{ "docstring": "Returns a function to be called on each worker that returns a Fake Dataset.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/ray-project/ray.git
9
read_dict
def read_dict(self, dictionary, source='<dict>'): elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
configparser.py
222
add python 3.10.4 for windows
56,469
0
298
141
42
221,674
60
XX-Net
19
python3.10.4/Lib/configparser.py
Python
18
{ "docstring": "Read configuration from a dictionary.\n\n Keys are section names, values are dictionaries with keys and values\n that should be present in the section. If the used dictionary type\n preserves order, sections and their keys will be added in order.\n\n All types held in the dictionary are converted to strings during\n reading, including section names, option names and keys.\n\n Optional second argument is the `source' specifying the name of the\n dictionary being read.\n ", "language": "en", "n_whitespaces": 128, "n_words": 72, "vocab_size": 54 }
https://github.com/XX-net/XX-Net.git
1
get_deployment_statuses
def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]: return internal_get_global_client().get_deployment_statuses()
1100c982223757f697a410a0d0c3d8bf3ff9c805
9
api.py
35
[serve] Implement Serve Application object (#22917) The concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it.
33,628
0
13
20
7
146,198
7
ray
5
python/ray/serve/api.py
Python
18
{ "docstring": "Returns a dictionary of deployment statuses.\n\n A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}.\n\n Example:\n\n >>> statuses = get_deployment_statuses()\n >>> status_info = statuses[\"deployment_name\"]\n >>> status = status_info.status\n >>> message = status_info.message\n\n Returns:\n Dict[str, DeploymentStatus]: This dictionary maps the running\n deployment's name to a DeploymentStatus object containing its\n status and a message explaining the status.\n ", "language": "en", "n_whitespaces": 121, "n_words": 56, "vocab_size": 40 }
https://github.com/ray-project/ray.git
1
test_remove_config_entry_from_device_fails
async def test_remove_config_entry_from_device_fails(hass, hass_ws_client): assert await async_setup_component(hass, "config", {}) ws_client = await hass_ws_client(hass) device_registry = mock_device_registry(hass)
c496748125b811ef5437ad666d21c09025e0967f
9
test_device_registry.py
56
Add WS API for removing a config entry from a device (#66188) * Add WS API for removing a config entry from a device * Address review comments * Address review comments * Remove entity cleanup from ConfigEntries * Update + add tests * Improve comments in test * Add negative test * Refactor according to review comments * Add back async_remove_config_entry_device * Remove unnecessary error handling * Simplify error handling
91,454
0
28
617
14
292,362
16
core
7
tests/components/config/test_device_registry.py
Python
132
{ "docstring": "Test removing config entry from device failing cases.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
6
geometric_edges
def geometric_edges(G, radius, p=2): nodes_pos = G.nodes(data="pos") try: import scipy as sp import scipy.spatial # call as sp.spatial except ImportError: # no scipy KDTree so compute by for-loop radius_p = radius**p edges = [ (u, v) for (u, pu), (v, pv) in combinations(nodes_pos, 2) if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p ] return edges # scipy KDTree is available nodes, coords = list(zip(*nodes_pos)) kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator. edge_indexes = kdtree.query_pairs(radius, p) edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)] return edges @py_random_state(5)
ad99a5862264355d5eaaa309136475bfb9a66c12
@py_random_state(5)
18
geometric.py
247
Add default value p=2 for minkowski distance metric. (#5700)
42,122
1
206
153
69
176,826
93
networkx
31
networkx/generators/geometric.py
Python
18
{ "docstring": "Returns edge list of node pairs within `radius` of each other.\n\n Parameters\n ----------\n G : networkx graph\n The graph from which to generate the edge list. The nodes in `G` should\n have an attribute ``pos`` corresponding to the node position, which is\n used to compute the distance to other nodes.\n radius : scalar\n The distance threshold. Edges are included in the edge list if the\n distance between the two nodes is less than `radius`.\n p : scalar, default=2\n The `Minkowski distance metric\n <https://en.wikipedia.org/wiki/Minkowski_distance>`_ used to compute\n distances. The default value is 2, i.e. Euclidean distance.\n\n Returns\n -------\n edges : list\n List of edges whose distances are less than `radius`\n\n Notes\n -----\n Radius uses Minkowski distance metric `p`.\n If scipy is available, `scipy.spatial.cKDTree` is used to speed computation.\n\n Examples\n --------\n Create a graph with nodes that have a \"pos\" attribute representing 2D\n coordinates.\n\n >>> G = nx.Graph()\n >>> G.add_nodes_from([\n ... (0, {\"pos\": (0, 0)}),\n ... (1, {\"pos\": (3, 0)}),\n ... (2, {\"pos\": (8, 0)}),\n ... ])\n >>> nx.geometric_edges(G, radius=1)\n []\n >>> nx.geometric_edges(G, radius=4)\n [(0, 1)]\n >>> nx.geometric_edges(G, radius=6)\n [(0, 1), (1, 2)]\n >>> nx.geometric_edges(G, radius=9)\n [(0, 1), (0, 2), (1, 2)]\n ", "language": "en", "n_whitespaces": 358, "n_words": 190, "vocab_size": 115 }
https://github.com/networkx/networkx.git
4
fields
def fields(class_or_instance): # Might it be worth caching this, per class? try: fields = getattr(class_or_instance, _FIELDS) except AttributeError: raise TypeError('must be called with a dataclass type or instance') # Exclude pseudo-fields. Note that fields is sorted by insertion # order, so the order of the tuple is as the fields were defined. return tuple(f for f in fields.values() if f._field_type is _FIELD)
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
dataclasses.py
76
add python 3.10.4 for windows
56,538
0
98
43
53
222,330
62
XX-Net
11
python3.10.4/Lib/dataclasses.py
Python
6
{ "docstring": "Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 20 }
https://github.com/XX-net/XX-Net.git
2
_invert_monoms
def _invert_monoms(p1): terms = list(p1.items()) terms.sort() deg = p1.degree() R = p1.ring p = R.zero cv = p1.listcoeffs() mv = p1.listmonoms() for mvi, cvi in zip(mv, cv): p[(deg - mvi[0],)] = cvi return p
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
12
ring_series.py
131
Cleanup loops and ranges
48,908
0
71
80
26
198,397
34
sympy
19
sympy/polys/ring_series.py
Python
11
{ "docstring": "\n Compute ``x**n * p1(1/x)`` for a univariate polynomial ``p1`` in ``x``.\n\n Examples\n ========\n\n >>> from sympy.polys.domains import ZZ\n >>> from sympy.polys.rings import ring\n >>> from sympy.polys.ring_series import _invert_monoms\n >>> R, x = ring('x', ZZ)\n >>> p = x**2 + 2*x + 3\n >>> _invert_monoms(p)\n 3*x**2 + 2*x + 1\n\n See Also\n ========\n\n sympy.polys.densebasic.dup_reverse\n ", "language": "en", "n_whitespaces": 96, "n_words": 53, "vocab_size": 38 }
https://github.com/sympy/sympy.git
4
to_python
def to_python(self, value): value = super().to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) # Strip trailing decimal and zeros. try: value = int(self.re_decimal.sub("", str(value))) except (ValueError, TypeError): raise ValidationError(self.error_messages["invalid"], code="invalid") return value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
fields.py
139
Refs #33476 -- Reformatted code with Black.
51,297
0
136
83
28
205,955
36
django
17
django/forms/fields.py
Python
11
{ "docstring": "\n Validate that int() can be called on the input. Return the result\n of int() or None for empty values.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
https://github.com/django/django.git
3
_zoom_data_limits
def _zoom_data_limits(self, scale_u, scale_v, scale_w): scale = np.array([scale_u, scale_v, scale_w]) # Only perform frame conversion if unequal scale factors if not np.allclose(scale, scale_u): # Convert the scale factors from the view frame to the data frame R = np.array([self._view_u, self._view_v, self._view_w]) S = scale * np.eye(3) scale = np.linalg.norm(R.T @ S, axis=1) # Set the constrained scale factors to the factor closest to 1 if self._aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'): ax_idxs = self._equal_aspect_axis_indices(self._aspect) min_ax_idxs = np.argmin(np.abs(scale[ax_idxs] - 1)) scale[ax_idxs] = scale[ax_idxs][min_ax_idxs] self._scale_axis_limits(scale[0], scale[1], scale[2])
4896ec1a2cfb8c454e385632d8df213c915ced52
16
axes3d.py
246
Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449) * ENH: Add pan and zoom toolbar handling to 3D Axes 1) This moves the pan logic that was already in the mouse move handler into the "drag_pan" method to make it available from the toolbar. 2) This expands upon the panning logic to enable a zoom-to-box feature. The zoom-to-box is done relative to the Axes, so it shrinks/expands the box as a fraction of each delta, from lower-left Axes to lower-left zoom-box. Thus, it tries to handle non-centered zooms, which adds more cases to handle versus the current right-click zoom only scaling from the center of the projection. * Rewrite zooming with bounding box * Rewrite 3d panning to work with a roll angle * Whats new for zoom and pan buttons * Make pan button configurable * Do not jump when zooming and mouse goes over other subplot * Rework zooming for 3d plots * Handle x/y lock when zooming and panning * Update tests * Docstrings * Dont assume a scale_z * Limit zoom box * Test zoom pan key modifiers * Save some calculation by saving view axes * Deprecation warnings for Axes3D.eye, .vvec * Remove Axes3D._prepare_view_from_bbox for now * Comments and docstrings * Switch from uvn to uvw * Save aspect to axes * Constrain zooming with mouse when one of the equal aspect ratios is set * Cleanup * Cleanup * Consolidate finding equal aspect axis indices * linting * More intuitive scaling * Box zoom keeps existing aspect ratios * Linting * Code review comments * Revert parameters for view_transformation * Fix new 3d pan/zoom view going on view stack twice * Better clipping * Test 3d toolbar navigation * Privatize helper functions * Deprecations * Code review changes * Deprecation note * Undeprecate proj3d.view_transformation * Undeprecate proj3d.view_transformation * Update doc/api/next_api_changes/deprecations/23449-SS.rst Co-authored-by: Greg Lucas <[email protected]> Co-authored-by: Scott Shambaugh <[email protected]> Co-authored-by: Oscar Gustafsson <[email protected]>
23,730
0
231
159
59
109,746
85
matplotlib
26
lib/mpl_toolkits/mplot3d/axes3d.py
Python
11
{ "docstring": "\n Zoom in or out of a 3D plot.\n Will scale the data limits by the scale factors. These will be\n transformed to the x, y, z data axes based on the current view angles.\n A scale factor > 1 zooms out and a scale factor < 1 zooms in.\n\n For an axes that has had its aspect ratio set to 'equal', 'equalxy',\n 'equalyz', or 'equalxz', the relevant axes are constrained to zoom\n equally.\n\n Parameters\n ----------\n scale_u : float\n Scale factor for the u view axis (view screen horizontal).\n scale_v : float\n Scale factor for the v view axis (view screen vertical).\n scale_w : float\n Scale factor for the w view axis (view screen depth).\n ", "language": "en", "n_whitespaces": 239, "n_words": 114, "vocab_size": 73 }
https://github.com/matplotlib/matplotlib.git
2
start
def start(self): self.worker = _get_worker( args=self.args, target=run, kwargs={ 'args': self.args, 'name': self.name, 'container_name': self.container_name, 'net_mode': self.net_mode, 'runtime_ctrl_address': self.runtime_ctrl_address, 'envs': self._envs, 'is_started': self.is_started, 'is_shutdown': self.is_shutdown, 'is_ready': self.is_ready, }, ) self.worker.start() if not self.args.noblock_on_start: self.wait_start_success() return self
933415bfa1f9eb89f935037014dfed816eb9815d
12
container.py
172
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
1,734
0
267
104
35
9,859
35
jina
18
jina/peapods/peas/container.py
Python
20
{ "docstring": "Start the ContainerPea.\n This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n .. #noqa: DAR201\n ", "language": "en", "n_whitespaces": 35, "n_words": 14, "vocab_size": 14 }
https://github.com/jina-ai/jina.git
1
info
def info(self, pretty=False, best=False): # type: (bool, bool) -> InfoDict return dict( id=self.id(), version=self.version(pretty, best), version_parts=dict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), )
f3166e673fe8d40277b804d35d77dcdb760fc3b3
13
distro.py
130
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,224
0
162
86
23
20,079
23
pipenv
15
pipenv/patched/notpip/_vendor/distro.py
Python
12
{ "docstring": "\n Return certain machine-readable information about the OS\n distribution.\n\n For details, see :func:`distro.info`.\n ", "language": "en", "n_whitespaces": 41, "n_words": 12, "vocab_size": 12 }
https://github.com/pypa/pipenv.git
1
data
def data(self) -> 'DataRequest._DataContent': return DataRequest._DataContent(self.proto_with_data.data)
c3849c6fee4a65a77a82b2cfda9670d727ff0f53
9
data.py
35
feat: allow to access parameters of data request wo loading data (#4991)
2,384
0
20
19
6
12,698
6
jina
5
jina/types/request/data.py
Python
6
{ "docstring": "Get the data contained in this data request\n\n :return: the data content as an instance of _DataContent wrapping docs\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 16 }
https://github.com/jina-ai/jina.git
1
test_modify_event
def test_modify_event(self) -> None: # first patch the event checker so that it will modify the event
2ffaf30803f93273a4d8a65c9e6c3110c8433488
6
test_third_party_rules.py
17
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
71,596
0
31
139
15
247,330
17
synapse
2
tests/rest/client/test_third_party_rules.py
Python
20
{ "docstring": "The module can return a modified version of the event", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
Dagum
def Dagum(name, p, a, b): r return rv(name, DagumDistribution, (p, a, b)) #------------------------------------------------------------------------------- # Erlang distribution ----------------------------------------------------------
9ad8ab9fe58051cf11626ba6654852fcfec60147
8
crv_types.py
40
Documentation cleanup 5
48,100
0
20
28
16
196,682
17
sympy
7
sympy/stats/crv_types.py
Python
57
{ "docstring": "\n Create a continuous random variable with a Dagum distribution.\n\n Explanation\n ===========\n\n The density of the Dagum distribution is given by\n\n .. math::\n f(x) := \\frac{a p}{x} \\left( \\frac{\\left(\\tfrac{x}{b}\\right)^{a p}}\n {\\left(\\left(\\tfrac{x}{b}\\right)^a + 1 \\right)^{p+1}} \\right)\n\n with :math:`x > 0`.\n\n Parameters\n ==========\n\n p : Real number\n `p > 0`, a shape.\n a : Real number\n `a > 0`, a shape.\n b : Real number\n `b > 0`, a scale.\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import Dagum, density, cdf\n >>> from sympy import Symbol\n\n >>> p = Symbol(\"p\", positive=True)\n >>> a = Symbol(\"a\", positive=True)\n >>> b = Symbol(\"b\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = Dagum(\"x\", p, a, b)\n\n >>> density(X)(z)\n a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z\n\n >>> cdf(X)(z)\n Piecewise(((1 + (z/b)**(-a))**(-p), z >= 0), (0, True))\n\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Dagum_distribution\n\n ", "language": "en", "n_whitespaces": 269, "n_words": 132, "vocab_size": 89 }
https://github.com/sympy/sympy.git
1
diffusion_defaults
def diffusion_defaults(): return dict( learn_sigma=False, diffusion_steps=1000, noise_schedule="linear", timestep_respacing="", use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False, )
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
9
script_util.py
63
add disco_diffusion_cnclip_vitb16 module
9,936
0
78
41
13
49,840
13
PaddleHub
10
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/script_util.py
Python
11
{ "docstring": "\n Defaults for image and classifier training.\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
https://github.com/PaddlePaddle/PaddleHub.git
6
resize
def resize(self, n): aform = self.array_form l = len(aform) if n > l: aform += list(range(l, n)) return Permutation._af_new(aform) elif n < l: cyclic_form = self.full_cyclic_form new_cyclic_form = [] for cycle in cyclic_form: cycle_min = min(cycle) cycle_max = max(cycle) if cycle_min <= n-1: if cycle_max > n-1: raise ValueError( "The permutation cannot be resized to {} " "because the cycle {} may break." .format(n, tuple(cycle))) new_cyclic_form.append(cycle) return Permutation(new_cyclic_form) return self # XXX Deprecated flag print_cyclic = None
498015021131af4dbb07eb110e5badaba8250c7b
20
permutations.py
197
Updated import locations
47,668
0
378
116
58
196,168
77
sympy
24
sympy/combinatorics/permutations.py
Python
21
{ "docstring": "Resize the permutation to the new size ``n``.\n\n Parameters\n ==========\n\n n : int\n The new size of the permutation.\n\n Raises\n ======\n\n ValueError\n If the permutation cannot be resized to the given size.\n This may only happen when resized to a smaller size than\n the original.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n\n Increasing the size of a permutation:\n\n >>> p = Permutation(0, 1, 2)\n >>> p = p.resize(5)\n >>> p\n (4)(0 1 2)\n\n Decreasing the size of the permutation:\n\n >>> p = p.resize(4)\n >>> p\n (3)(0 1 2)\n\n If resizing to the specific size breaks the cycles:\n\n >>> p.resize(2)\n Traceback (most recent call last):\n ...\n ValueError: The permutation cannot be resized to 2 because the\n cycle (0, 1, 2) may break.\n ", "language": "en", "n_whitespaces": 340, "n_words": 121, "vocab_size": 70 }
https://github.com/sympy/sympy.git
3
_remove_old_push_actions_that_have_rotated
async def _remove_old_push_actions_that_have_rotated(self) -> None: # We want to clear out anything that is older than a day that *has* already # been rotated. rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( table="event_push_summary_stream_ordering", keyvalues={}, retcol="stream_ordering", ) max_stream_ordering_to_delete = min( rotated_upto_stream_ordering, self.stream_ordering_day_ago )
46bd7f4ed9020bbed459c03a11c26d7f7c3093b0
11
event_push_actions.py
72
Clarifications for event push action processing. (#13485) * Clarifies comments. * Fixes an erroneous comment (about return type) added in #13455 (ec24813220f9d54108924dc04aecd24555277b99). * Clarifies the name of a variable. * Simplifies logic of pulling out the latest join for the requesting user.
72,870
0
131
64
34
249,367
38
synapse
11
synapse/storage/databases/main/event_push_actions.py
Python
18
{ "docstring": "Clear out old push actions that have been summarised.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
1
_asarray
def _asarray(arr): _check_arraylike("_asarray", arr) dtype, weak_type = dtypes._lattice_result_type(arr) return lax_internal._convert_element_type(arr, dtype, weak_type)
ddf23dead3a4214e6ef8a515d0a51648360ab797
8
util.py
55
lax_numpy.py: factor out some common utilities Re-lands part of #9724 PiperOrigin-RevId: 433838553
26,678
0
16
33
11
119,743
12
jax
9
jax/_src/numpy/util.py
Python
4
{ "docstring": "\n Pared-down utility to convert object to a DeviceArray.\n Note this will not correctly handle lists or tuples.\n ", "language": "en", "n_whitespaces": 21, "n_words": 17, "vocab_size": 16 }
https://github.com/google/jax.git
2
_get_driver
def _get_driver(self) -> str: drivers = [device.get("driverVersion", "No Driver Found") for device in self._device_details] self._log("debug", f"GPU Drivers: {drivers}") return drivers
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
10
amd.py
69
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
20,028
0
66
37
19
100,564
20
faceswap
8
lib/gpu_stats/amd.py
Python
12
{ "docstring": " Obtain the AMD driver version currently in use.\n\n Returns\n -------\n str\n The current AMD GPU driver versions\n ", "language": "en", "n_whitespaces": 57, "n_words": 17, "vocab_size": 15 }
https://github.com/deepfakes/faceswap.git
1
test_payment_refund_or_void_refund_called
def test_payment_refund_or_void_refund_called(refund_mock, payment): # given payment.transactions.count() == 0 payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=["charge_status"]) # when gateway.payment_refund_or_void(payment, get_plugins_manager(), None) # then assert refund_mock.called_once() @patch("saleor.payment.gateway.refund")
0881beec1ac02dfa97525c5173687defb356d85c
@patch("saleor.payment.gateway.refund")
10
test_gateway.py
101
Fix payment flow (#9504) * Do not capture payment again when it should be refunded or voided * Do not create order when then is ongoing refund
5,044
1
48
52
20
26,685
22
saleor
15
saleor/payment/tests/test_gateway.py
Python
6
{ "docstring": "Ensure that the refund method is called when payment can be refunded\n and there is no refund transaction.", "language": "en", "n_whitespaces": 20, "n_words": 18, "vocab_size": 16 }
https://github.com/saleor/saleor.git
16
telescopic
def telescopic(L, R, limits): (i, a, b) = limits if L.is_Add or R.is_Add: return None # We want to solve(L.subs(i, i + m) + R, m) # First we try a simple match since this does things that # solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails k = Wild("k") sol = (-R).match(L.subs(i, i + k)) s = None if sol and k in sol: s = sol[k] if not (s.is_Integer and L.subs(i, i + s) == -R): # sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x})) s = None # But there are things that match doesn't do that solve # can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1 if s is None: m = Dummy('m') try: from sympy.solvers.solvers import solve sol = solve(L.subs(i, i + m) + R, m) or [] except NotImplementedError: return None sol = [si for si in sol if si.is_Integer and (L.subs(i, i + si) + R).expand().is_zero] if len(sol) != 1: return None s = sol[0] if s < 0: return telescopic_direct(R, L, abs(s), (i, a, b)) elif s > 0: return telescopic_direct(L, R, s, (i, a, b))
f757f3daae6e11ea0cfb7dadc133274d8d74315f
19
summations.py
374
Reordered imports 2
48,161
0
391
242
104
196,771
189
sympy
27
sympy/concrete/summations.py
Python
27
{ "docstring": "\n Tries to perform the summation using the telescopic property.\n\n Return None if not possible.\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 13 }
https://github.com/sympy/sympy.git
1
insights_lookback_period
def insights_lookback_period(self): return pendulum.duration(days=self._insights_lookback_window)
76032e66dd2b75c611440bdccfc3d55863fb3e39
9
base_insight_streams.py
30
Source Facebook Marketing: Add lookback window to insights streams (#12402)
773
0
18
17
4
5,494
4
airbyte
6
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_insight_streams.py
Python
2
{ "docstring": "\n Facebook freezes insight data 28 days after it was generated, which means that all data\n from the past 28 days may have changed since we last emitted it, so we retrieve it again.\n But in some cases users my have define their own lookback window, thats\n why the value for `insights_lookback_window` is set throught config.\n ", "language": "en", "n_whitespaces": 91, "n_words": 55, "vocab_size": 48 }
https://github.com/airbytehq/airbyte.git
5
check_permissions_for_custom_prices
def check_permissions_for_custom_prices(app, lines): if any(["price" in line for line in lines]) and ( not app or not app.has_perm(CheckoutPermissions.HANDLE_CHECKOUTS) ): raise PermissionDenied(permissions=[CheckoutPermissions.HANDLE_CHECKOUTS])
620569b3a2466e8dda80df20a11b99ec0bec8c7c
12
utils.py
79
Custom prices (#9393) * Add price_override to CheckoutLine model * Allow to define custom price in checkout mutations * Apply code review suggestions * Use custom price when generating checkout payload * Use price override when calculating prices for checkout * Update populatedb - create checkout with custom prices * Fix schema.graphql file * Make quantity optional in `CheckoutLinesUpdate` mutation (#9430) * Make quantity optional in CheckoutLinesUpdate mutation * Refactor add_variants_to_checkout checkout utils * Update changelog * Update quantity field description in CheckoutLineUpdateInput
5,029
0
44
48
18
26,585
21
saleor
10
saleor/graphql/checkout/mutations/utils.py
Python
5
{ "docstring": "Raise PermissionDenied when custom price is changed by user or app without perm.\n\n Checkout line custom price can be changed only by app with\n handle checkout permission.\n ", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 22 }
https://github.com/saleor/saleor.git
4
_perform_invalid_key_test
def _perform_invalid_key_test(self, key, expected_warning, key_func=None): # mimic custom ``make_key`` method being defined since the default will # never show the below warnings
9c19aff7c7561e3a82978a272ecdaad40dda5c00
6
tests.py
24
Refs #33476 -- Reformatted code with Black.
50,040
0
43
191
20
202,050
22
django
5
tests/cache/tests.py
Python
24
{ "docstring": "\n All the builtin backends should warn (except memcached that should\n error) on keys that would be refused by memcached. This encourages\n portable caching code without making it too difficult to use production\n backends with more liberal key rules. Refs #6447.\n ", "language": "en", "n_whitespaces": 76, "n_words": 40, "vocab_size": 37 }
https://github.com/django/django.git
1
set_negative_aliases
def set_negative_aliases(self, negative_alias): self._check_alias_dict(negative_alias, "negative alias") self.negative_alias = negative_alias
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
fancy_getopt.py
37
add python 3.10.4 for windows
56,809
0
30
21
9
222,921
9
XX-Net
4
python3.10.4/Lib/distutils/fancy_getopt.py
Python
3
{ "docstring": "Set the negative aliases for this option parser.\n 'negative_alias' should be a dictionary mapping option names to\n option names, both the key and value must already be defined\n in the option table.", "language": "en", "n_whitespaces": 52, "n_words": 32, "vocab_size": 26 }
https://github.com/XX-net/XX-Net.git
9
aug_test_bboxes
def aug_test_bboxes(self, feats, img_metas, rescale=False): # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results]
9a3bf7660e6ced54672741095f96df07919f9ba7
14
dense_test_mixins.py
567
[Refactor] Refactor dense head outputs to InstanceResults.
70,341
0
674
361
122
244,349
171
mmdetection
54
mmdet/models/dense_heads/dense_test_mixins.py
Python
46
{ "docstring": "Test det bboxes with test time augmentation, can be applied in\n DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n etc.\n\n Args:\n feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n The first item is ``bboxes`` with shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n The shape of the second tensor in the tuple is ``labels``\n with shape (n,). The length of list should always be 1.\n ", "language": "en", "n_whitespaces": 345, "n_words": 131, "vocab_size": 92 }
https://github.com/open-mmlab/mmdetection.git
7
remove_if_stale
def remove_if_stale(self): try: pid = self.read_pid() except ValueError: print('Broken pidfile found - Removing it.', file=sys.stderr) self.remove() return True if not pid: self.remove() return True try: os.kill(pid, 0) except os.error as exc: if exc.errno == errno.ESRCH or exc.errno == errno.EPERM: print('Stale pidfile exists - Removing it.', file=sys.stderr) self.remove() return True except SystemError: print('Stale pidfile exists - Removing it.', file=sys.stderr) self.remove() return True return False
13d545b2155ebe9ee0ffad9e9d9ffc09a39185df
14
platforms.py
202
Fix grammar typos on the whole project
52,237
0
285
119
35
208,205
63
celery
18
celery/platforms.py
Python
22
{ "docstring": "Remove the lock if the process isn't running.\n\n I.e. process does not respond to signal.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 13 }
https://github.com/celery/celery.git
1
_tie_weights
def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, )
e09473a817c5e5871e11cc81004355ef30250502
@add_start_docstrings( """ XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_ROBERTA_XL_START_DOCSTRING, )
8
modeling_xlm_roberta_xl.py
38
Add support for XLM-R XL and XXL models by modeling_xlm_roberta_xl.py (#13727) * add xlm roberta xl * add convert xlm xl fairseq checkpoint to pytorch * fix init and documents for xlm-roberta-xl * fix indention * add test for XLM-R xl,xxl * fix model hub name * fix some stuff * up * correct init * fix more * fix as suggestions * add torch_device * fix default values of doc strings * fix leftovers * merge to master * up * correct hub names * fix docs * fix model * up * finalize * last fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * add copied from * make style Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
6,311
1
44
14
27
34,690
27
transformers
6
src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
Python
2
{ "docstring": "\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 21 }
https://github.com/huggingface/transformers.git
5
uri_to_iri
def uri_to_iri(uri): if uri is None: return uri uri = force_bytes(uri) # Fast selective unquote: First, split on '%' and then starting with the # second block, decode the first 2 bytes if they represent a hex code to # decode. The rest of the block is the part after '%AB', not containing # any '%'. Add that to the output without further processing. bits = uri.split(b"%") if len(bits) == 1: iri = uri else: parts = [bits[0]] append = parts.append hextobyte = _hextobyte for item in bits[1:]: hex = item[:2] if hex in hextobyte: append(hextobyte[item[:2]]) append(item[2:]) else: append(b"%") append(item) iri = b"".join(parts) return repercent_broken_unicode(iri).decode()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
encoding.py
212
Refs #33476 -- Reformatted code with Black.
51,602
0
280
126
77
206,644
105
django
16
django/utils/encoding.py
Python
21
{ "docstring": "\n Convert a Uniform Resource Identifier(URI) into an Internationalized\n Resource Identifier(IRI).\n\n This is the algorithm from section 3.2 of RFC 3987, excluding step 4.\n\n Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return\n a string containing the encoded result (e.g. '/I%20♥%20Django/').\n ", "language": "en", "n_whitespaces": 60, "n_words": 41, "vocab_size": 36 }
https://github.com/django/django.git
1
test_ohe_drop_first_handle_unknown_ignore_warns
def test_ohe_drop_first_handle_unknown_ignore_warns(handle_unknown): X = [["a", 0], ["b", 2], ["b", 1]] ohe = OneHotEncoder(drop="first", sparse=False, handle_unknown=handle_unknown) X_trans = ohe.fit_transform(X) X_expected = np.array( [ [0, 0, 0], [1, 0, 1], [1, 1, 0], ] ) assert_allclose(X_trans, X_expected) # Both categories are unknown X_test = [["c", 3]] X_expected = np.array([[0, 0, 0]]) warn_msg = ( r"Found unknown categories in columns \[0, 1\] during " "transform. These unknown categories will be encoded as all " "zeros" ) with pytest.warns(UserWarning, match=warn_msg): X_trans = ohe.transform(X_test) assert_allclose(X_trans, X_expected) # inverse_transform maps to None X_inv = ohe.inverse_transform(X_expected) assert_array_equal(X_inv, np.array([["a", 0]], dtype=object)) @pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"])
7f0006c8aad1a09621ad19c3db19c3ff0555a183
@pytest.mark.parametrize("handle_unknown", ["ignore", "infrequent_if_exist"])
12
test_encoders.py
307
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
75,665
1
221
178
71
259,232
96
scikit-learn
27
sklearn/preprocessing/tests/test_encoders.py
Python
24
{ "docstring": "Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist'\n during transform.", "language": "en", "n_whitespaces": 8, "n_words": 6, "vocab_size": 6 }
https://github.com/scikit-learn/scikit-learn.git
3
_add_x_axis_labels
def _add_x_axis_labels(self): val_range = np.arange( 0.5, len(self.bar_names), 1 ) # 0.5 shifted so that labels are centered, not on ticks labels = VGroup() for i, (value, bar_name) in enumerate(zip(val_range, self.bar_names)): # to accommodate negative bars, the label may need to be # below or above the x_axis depending on the value of the bar if self.values[i] < 0: direction = UP else: direction = DOWN bar_name_label = self.x_axis.label_constructor(bar_name) bar_name_label.font_size = self.x_axis.font_size bar_name_label.next_to( self.x_axis.number_to_point(value), direction=direction, buff=self.x_axis.line_to_number_buff, ) labels.add(bar_name_label) self.x_axis.labels = labels self.x_axis.add(labels)
149479f9132daf2266c27caa7a3e11ce06be501d
12
probability.py
211
Refactored :class:`~.BarChart` and made it inherit from :class:`~.Axes`. (#2387) * rebase * fixed None bar_names * fixed scale issues * fixed to accept negative bar values * fixed some bugs * Added docs for parameters (DRAFT) * clean up parameters * more clean up * clean up __init__ * replace add_x_labels with built-in functionality * adjust default font_size for labels * Update docs descriptions * Add bar_width and adjust get_bar_labels * Add bar_width and adjust get_bar_labels * Add docs to class and methods * remove unecessary imports * remove getters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Attempt to fix duplicated parameters section * adjust BarChart example to include title * switch order around * change_bar_values * back to get_bar_values * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add docs for _update_default_config and fix method * remove print(dicts) * allow negative_numbers to work with bar chart * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * allow negative numbers to work with change_bar_values * add test_probability.py * add control data * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change example * update examples again * rewrite test * rewrite other test * remove comma after list in example * improve wording in docs * add parameter/docs for label_constructor * change create_label_tex and update methods * update docs * use decimal number * switch default to Tex * update instances of create_label_tex in coordinate_systems.py * hardcode for add_labels * add TODO * use label_constructor * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix indentation in docs * Fix minor doc typo Co-authored-by: Led Me Explain <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
46,048
0
316
135
64
189,413
81
manim
27
manim/mobject/probability.py
Python
20
{ "docstring": "Essentially ``:meth:~.NumberLine.add_labels``, but differs in that\n the direction of the label with respect to the x_axis changes to UP or DOWN\n depending on the value.\n\n UP for negative values and DOWN for positive values.\n ", "language": "en", "n_whitespaces": 62, "n_words": 34, "vocab_size": 27 }
https://github.com/ManimCommunity/manim.git
2
async_cancel_refresh_callback
def async_cancel_refresh_callback(self) -> None: if self._refresh_callback is not None: self._refresh_callback() self._refresh_callback = None
274584f2a47b7db9943168316122196a540080e5
9
entity.py
45
Add strict typing for litterrobot (#75540)
116,446
0
49
26
11
317,880
13
core
3
homeassistant/components/litterrobot/entity.py
Python
5
{ "docstring": "Clear the refresh callback if it has not already fired.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
8
all_displays
def all_displays(): # lazy import to avoid circular imports from sklearn.base from ._testing import ignore_warnings all_classes = [] root = str(Path(__file__).parent.parent) # sklearn package # Ignore deprecation warnings triggered at import time and from walking # packages with ignore_warnings(category=FutureWarning): for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."): module_parts = module_name.split(".") if ( any(part in _MODULE_TO_IGNORE for part in module_parts) or "._" in module_name ): continue module = import_module(module_name) classes = inspect.getmembers(module, inspect.isclass) classes = [ (name, display_class) for name, display_class in classes if not name.startswith("_") and name.endswith("Display") ] all_classes.extend(classes) return sorted(set(all_classes), key=itemgetter(0))
b22f7fa552c03aa7f6b9b4d661470d0173f8db5d
17
discovery.py
258
MNT numpydoc validation for Displays (#21469) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,673
0
304
153
69
261,168
91
scikit-learn
37
sklearn/utils/discovery.py
Python
21
{ "docstring": "Get a list of all displays from `sklearn`.\n\n Returns\n -------\n displays : list of tuples\n List of (name, class), where ``name`` is the display class name as\n string and ``class`` is the actual type of the class.\n ", "language": "en", "n_whitespaces": 63, "n_words": 37, "vocab_size": 29 }
https://github.com/scikit-learn/scikit-learn.git
4
on_mode_entered
def on_mode_entered(self, mode): if (config.val.tabs.mode_on_change == 'restore' and mode in modeman.INPUT_MODES): tab = self.widget.currentWidget() if tab is not None: assert isinstance(tab, browsertab.AbstractTab), tab tab.data.input_mode = mode
a20bb67a878b2e68abf8268c1b0a27f018d01352
12
tabbedbrowser.py
96
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
117,362
0
107
60
21
320,800
26
qutebrowser
17
qutebrowser/mainwindow/tabbedbrowser.py
Python
7
{ "docstring": "Save input mode when tabs.mode_on_change = restore.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/qutebrowser/qutebrowser.git
5
user_state_dir
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): r if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path
47b5c8af57f0f927d50fe94f3474b09a54292553
14
appdirs.py
150
Standardize cache directory
7,368
0
91
95
28
40,266
38
seaborn
12
seaborn/external/appdirs.py
Python
40
{ "docstring": "Return full path to the user-specific state dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \"<major>.<minor>\".\n Only applied when appname is present.\n \"roaming\" (boolean, default False) can be set True to use the Windows\n roaming appdata directory. That means that for users on a Windows\n network setup for roaming profiles, this user data will be\n sync'd on login. See\n <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>\n for a discussion of issues.\n\n Typical user state directories are:\n Mac OS X: same as user_data_dir\n Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined\n Win *: same as user_data_dir\n\n For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>\n to extend the XDG spec and support $XDG_STATE_HOME.\n\n That means, by default \"~/.local/state/<AppName>\".\n ", "language": "en", "n_whitespaces": 413, "n_words": 194, "vocab_size": 135 }
https://github.com/mwaskom/seaborn.git
2
test_01_drop_predictor
def test_01_drop_predictor(self): if self.test_model_1 not in self.handler.get_tables().data_frame.values: # TODO: seems redundant because of test_02 query = f self.handler.native_query(query) response = self.handler.native_query(f"DROP PREDICTOR {self.test_model_1}") self.assertTrue(response.type == RESPONSE_TYPE.OK)
871793d4fbd99f454c0c1ff14db6ce3c385e656c
11
test_lightwood_handler.py
120
add more TS tests
25,578
0
79
58
25
115,838
26
mindsdb
16
mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py
Python
10
{ "docstring": "\n CREATE PREDICTOR {self.test_model_1}\n FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50)\n PREDICT rental_price\n ", "language": "en", "n_whitespaces": 70, "n_words": 13, "vocab_size": 12 }
https://github.com/mindsdb/mindsdb.git
1
truncated_cube_graph
def truncated_cube_graph(create_using=None): description = [ "adjacencylist", "Truncated Cube Graph", 24, [ [2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], [], ], ] G = make_small_undirected_graph(description, create_using) return G
dec723f072eb997a497a159dbe8674cd39999ee9
9
small.py
197
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
41,741
0
370
152
46
176,171
56
networkx
5
networkx/generators/small.py
Python
34
{ "docstring": "\n Returns the skeleton of the truncated cube.\n\n The truncated cube is an Archimedean solid with 14 regular\n faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_.\n The truncated cube is created by truncating (cutting off) the tips\n of the cube one third of the way into each edge [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Skeleton of the truncated cube\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Truncated_cube\n .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube\n\n ", "language": "en", "n_whitespaces": 153, "n_words": 91, "vocab_size": 68 }
https://github.com/networkx/networkx.git
8
on_no_available_trials
def on_no_available_trials(self, all_trials): # This is approximately saying we are not making progress. if len(all_trials) == self._last_trial_num: if self._no_running_trials_since == -1: self._no_running_trials_since = time.monotonic() elif ( time.monotonic() - self._no_running_trials_since > _get_insufficient_resources_warning_threshold() ): if not _is_ray_cluster(): # autoscaler not enabled # If any of the pending trial cannot be fulfilled, # that's a good enough hint of trial resources not enough. for trial in all_trials: if ( trial.status is Trial.PENDING and not _can_fulfill_no_autoscaler(trial) ): # TODO(xwjiang): # Raise an Error once #18608 is resolved. logger.warning(_get_insufficient_resources_error_msg(trial)) break else: # TODO(xwjiang): #17799. # Output a more helpful msg for autoscaler. logger.warning(_get_insufficient_resources_warning_msg()) self._no_running_trials_since = time.monotonic() else: self._no_running_trials_since = -1 self._last_trial_num = len(all_trials)
eb69c1ca286a2eec594f02ddaf546657a8127afd
20
insufficient_resources_manager.py
217
[air] Add annotation for Tune module. (#27060) Co-authored-by: Kai Fricke <[email protected]>
28,086
0
602
125
72
126,210
108
ray
19
python/ray/tune/execution/insufficient_resources_manager.py
Python
22
{ "docstring": "Tracks information across the life of Tune loop and makes guesses\n about if Tune loop is stuck due to infeasible resources.\n If so, outputs certain warning messages.\n The logic should be conservative, non-intrusive and informative.\n For example, rate limiting is applied so that the message is not\n spammy.\n ", "language": "en", "n_whitespaces": 90, "n_words": 48, "vocab_size": 42 }
https://github.com/ray-project/ray.git
1
test_door_lock_no_value
async def test_door_lock_no_value(hass, client, lock_schlage_be469_state, integration): node_state = replace_value_of_zwave_value( lock_schlage_be469_state, [ ZwaveValueMatcher( property_=CURRENT_MODE_PROPERTY, command_class=CommandClass.DOOR_LOCK, ) ], None, ) node = Node(client, node_state) client.driver.controller.emit("node added", {"node": node}) await hass.async_block_till_done() state = hass.states.get(SCHLAGE_BE469_LOCK_ENTITY) assert state assert state.state == STATE_UNKNOWN
d4c28e04e4793c328d1a1d096c2085ebbe7c5e7c
13
test_lock.py
134
Reduce missed coverage in zwave_js (#79571) * Reduce missed coverage in zwave_js.climate and cover * Add switch platform coverage * Add select platform * Add lock platform * Remove one line of coverage from number platform * update docstring
88,425
0
144
85
31
289,282
37
core
24
tests/components/zwave_js/test_lock.py
Python
17
{ "docstring": "Test a lock entity with door lock command class that has no value for mode.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
https://github.com/home-assistant/core.git
2
make_model
def make_model(): r inputs = [] intermediates = [] for _ in range(_NUM_LEARNERS): inp = keras.layers.Input(shape=(1,), dtype=tf.int32) layer = keras.layers.Embedding(1, 4)(inp) layer = keras.layers.Dense(1)(layer) inputs.append(inp) intermediates.append(layer) layer = keras.layers.Concatenate(axis=-1)(intermediates) layer = keras.layers.Dense(1)(layer) return keras.models.Model(inputs, layer) COEFFICIENT_PARAMS = ( ("Adadelta", adadelta.Adadelta, None), ("Adagrad", adagrad.Adagrad, None), ("Adam", adam.Adam, None), ("Adam_amdgrad", adam.Adam, dict(amsgrad=True)), ("Adamax", adamax.Adamax, None), ("Ftrl", ftrl.Ftrl, None), ( "Ftrl_l2_shrinkage", ftrl.Ftrl, dict(l2_shrinkage_regularization_strength=0.1), ), ("SGD", gradient_descent.SGD, None), ("SGD_momentum", gradient_descent.SGD, dict(momentum=0.5)), ("Nadam", nadam.Nadam, None), ("RMSprop", rmsprop.RMSprop, None), ("RMSprop_centered", rmsprop.RMSprop, dict(centered=True)), ("RMSprop_momentum", rmsprop.RMSprop, dict(momentum=0.5)), ( "RMSprop_momentum_centered", rmsprop.RMSprop, dict(momentum=0.5, centered=True), ), )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
optimizer_v2_test.py
454
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,429
0
229
123
57
275,572
86
keras
44
keras/optimizers/optimizer_v2/optimizer_v2_test.py
Python
41
{ "docstring": "Constructs a simple ensemble of weak learners model.\n\n --------- --------- --------- ---------\n | Input | | Input | ... | Input | | Input |\n --------- --------- --------- ---------\n | | | |\n V V V V\n --------- --------- --------- ---------\n | Embed | | Embed | ... | Embed | | Embed |\n --------- --------- --------- ---------\n | | | |\n V V V V\n --------- --------- --------- ---------\n | Dense | | Dense | ... | Dense | | Dense |\n --------- --------- --------- ---------\n \\ | | /\n \\ | | /\n ---------------------------------------------\n |\n ---------\n | Dense |\n ---------\n\n This topology is chosen because it exercises both dense and sparse update\n paths.\n\n Returns:\n A model for testing optimizer coefficient reuse.\n ", "language": "en", "n_whitespaces": 731, "n_words": 123, "vocab_size": 39 }
https://github.com/keras-team/keras.git
2
add_department_leaves
def add_department_leaves(events, start, end, employee, company): department = frappe.db.get_value("Employee", employee, "department") if not department: return # department leaves department_employees = frappe.db.sql_list( , (department, company), ) filter_conditions = ' and employee in ("%s")' % '", "'.join(department_employees) add_leaves(events, start, end, filter_conditions=filter_conditions)
494bd9ef78313436f0424b918f200dab8fc7c20b
10
leave_application.py
113
style: format code with black
14,117
0
28
71
33
66,160
39
erpnext
15
erpnext/hr/doctype/leave_application/leave_application.py
Python
11
{ "docstring": "select name from tabEmployee where department=%s\n\t\tand company=%s", "language": "en", "n_whitespaces": 6, "n_words": 8, "vocab_size": 8 }
https://github.com/frappe/erpnext.git
1
test_dataset_stats_read_parquet
def test_dataset_stats_read_parquet(ray_start_regular_shared, tmp_path): ds = ray.data.range(1000, parallelism=10) ds.write_parquet(str(tmp_path)) ds = ray.data.read_parquet(str(tmp_path)).map(lambda x: x) stats = canonicalize(ds.stats()) print(stats) assert stats ==
e9068c45faf2928cd8e38f6d38c5260cb06130d2
12
test_stats.py
113
[data] Instrument most remaining dataset functions and add docs (#21412) This PR finishes most of the stats todos for dataset. The main thing punted for future work is instrumentation of split(), which is particularly tricky since only certain blocks are transformed. Co-authored-by: Clark Zinzow <[email protected]>
28,886
0
38
69
16
129,063
20
ray
16
python/ray/data/tests/test_stats.py
Python
20
{ "docstring": "Stage Z read: N/N blocks executed in T\n* Remote wall time: T min, T max, T mean, T total\n* Remote cpu time: T min, T max, T mean, T total\n* Output num rows: N min, N max, N mean, N total\n* Output size bytes: N min, N max, N mean, N total\n* Tasks per node: N min, N max, N mean; N nodes used\n\nStage N map: N/N blocks executed in T\n* Remote wall time: T min, T max, T mean, T total\n* Remote cpu time: T min, T max, T mean, T total\n* Output num rows: N min, N max, N mean, N total\n* Output size bytes: N min, N max, N mean, N total\n* Tasks per node: N min, N max, N mean; N nodes used\n", "language": "en", "n_whitespaces": 126, "n_words": 138, "vocab_size": 30 }
https://github.com/ray-project/ray.git
7
emitCurrentToken
def emitCurrentToken(self): token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["StartTag"]: raw = token["data"] data = attributeMap(raw) if len(raw) > len(data): # we had some duplicated attribute, fix so first wins data.update(raw[::-1]) token["data"] = data if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out.
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
17
_tokenizer.py
293
upd; format
12,978
0
408
162
56
62,468
78
transferlearning
17
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_tokenizer.py
Python
19
{ "docstring": "This method is a generic handler for emitting the tags. It also sets\n the state to \"data\" because that's what's needed after a token has been\n emitted.\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 25 }
https://github.com/jindongwang/transferlearning.git
12
remove
def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info["errors"]) continue if rpmpkg: # assume use dnf or yum cmdflags = " -y remove " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" else: cmdexe = "/usr/bin/rpm" cmdflags = " -e " cmd = [cmdexe, cmdflags, named] out = __salt__["cmd.run_all"](cmd, python_shell=False) else: cmd = ["/usr/sbin/installp", "-u", named] out = __salt__["cmd.run_all"](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered removing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) return ret
fbcc707e76f11770712e6828155258ac61e00ff8
16
aixpkg.py
443
work in progress while resolve issue of python3_32 usage by dnf and yum
53,792
0
513
256
106
215,074
157
salt
38
salt/modules/aixpkg.py
Python
40
{ "docstring": "\n Remove specified fileset(s)/rpm package(s).\n\n name\n The name of the fileset or rpm package to be deleted.\n\n .. versionadded:: 3005\n\n preference to install rpm packages are to use in the following order:\n /opt/freeware/bin/dnf\n /opt/freeware/bin/yum\n /usr/bin/yum\n /usr/bin/rpm\n\n Multiple Package Options:\n\n pkgs\n A list of filesets and/or rpm packages to delete.\n Must be passed as a python list. The ``name`` parameter will be\n ignored if this option is passed.\n\n\n Returns a list containing the removed packages.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.remove <fileset/rpm package name>\n salt '*' pkg.remove tcsh\n salt '*' pkg.remove xlC.rte\n salt '*' pkg.remove Firefox.base.adt\n salt '*' pkg.remove pkgs='[\"foo\", \"bar\"]'\n ", "language": "en", "n_whitespaces": 243, "n_words": 101, "vocab_size": 72 }
https://github.com/saltstack/salt.git
16
_async_update_existing_host_entry
def _async_update_existing_host_entry(self) -> config_entries.ConfigEntry | None: for entry in self._async_current_entries(include_ignore=False): mac = entry.data.get(CONF_MAC) mac_match = mac and self._mac and mac == self._mac upnp_udn_match = self._upnp_udn and self._upnp_udn == entry.unique_id if ( entry.data[CONF_HOST] != self._host and not mac_match and not upnp_udn_match ): continue entry_kw_args: dict = {} if (self._udn and self._upnp_udn and self._upnp_udn != self._udn) or ( self.unique_id and entry.unique_id is None ): entry_kw_args["unique_id"] = self.unique_id if self._mac and not entry.data.get(CONF_MAC): entry_kw_args["data"] = {**entry.data, CONF_MAC: self._mac} if entry_kw_args: self.hass.config_entries.async_update_entry(entry, **entry_kw_args) self.hass.async_create_task( self.hass.config_entries.async_reload(entry.entry_id) ) return entry return None
2d4ccddd99bcc4bd314f813214be693788b1c0bf
15
config_flow.py
312
Fix SSDP unique id in SamsungTV config flow (#67811) Co-authored-by: epenet <[email protected]> Co-authored-by: J. Nick Koston <[email protected]>
92,385
0
401
197
49
293,322
86
core
26
homeassistant/components/samsungtv/config_flow.py
Python
29
{ "docstring": "Check existing entries and update them.\n\n Returns the existing entry if it was updated.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 13 }
https://github.com/home-assistant/core.git
1
_load_environment_vars
def _load_environment_vars(self): # type: () -> None self._config[kinds.ENV_VAR].update( self._normalized_keys(":env:", self.get_environ_vars()) )
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
configuration.py
53
upd; format
12,235
0
50
30
11
60,669
11
transferlearning
8
.venv/lib/python3.8/site-packages/pip/_internal/configuration.py
Python
4
{ "docstring": "Loads configuration from environment variables\n ", "language": "en", "n_whitespaces": 12, "n_words": 5, "vocab_size": 5 }
https://github.com/jindongwang/transferlearning.git
1
open
def open(self, host=None, port=None, timeout=None): self.host = None # For compatibility with parent class self.port = None self.sock = None self.file = None self.process = subprocess.Popen(self.command, bufsize=DEFAULT_BUFFER_SIZE, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, close_fds=True) self.writefile = self.process.stdin self.readfile = self.process.stdout
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
imaplib.py
139
add python 3.10.4 for windows
55,008
0
133
92
28
217,910
37
XX-Net
20
python3.10.4/Lib/imaplib.py
Python
11
{ "docstring": "Setup a stream connection.\n This connection will be used by the routines:\n read, readline, send, shutdown.\n ", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 16 }
https://github.com/XX-net/XX-Net.git
13
collapse_addresses
def collapse_addresses(addresses): addrs = [] ips = [] nets = [] # split IP addresses and networks for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) # sort and dedup ips = sorted(set(ips)) # find consecutive address ranges in the sorted sequence and summarize them if ips: for first, last in _find_address_range(ips): addrs.extend(summarize_address_range(first, last)) return _collapse_addresses_internal(addrs + nets)
8198943edd73a363c266633e1aa5b2a9e9c9f526
18
ipaddress.py
333
add python 3.10.4 for windows
55,342
0
448
205
71
218,488
128
XX-Net
23
python3.10.4/Lib/ipaddress.py
Python
28
{ "docstring": "Collapse a list of IP objects.\n\n Example:\n collapse_addresses([IPv4Network('192.0.2.0/25'),\n IPv4Network('192.0.2.128/25')]) ->\n [IPv4Network('192.0.2.0/24')]\n\n Args:\n addresses: An iterator of IPv4Network or IPv6Network objects.\n\n Returns:\n An iterator of the collapsed IPv(4|6)Network objects.\n\n Raises:\n TypeError: If passed a list of mixed version objects.\n\n ", "language": "en", "n_whitespaces": 134, "n_words": 38, "vocab_size": 28 }
https://github.com/XX-net/XX-Net.git
1
test_creating_algo_config_from_legacy_dict
def test_creating_algo_config_from_legacy_dict(self): config_dict = { "evaluation_config": { "lr": 0.1, }, "lr": 0.2, # Old-style multi-agent dict. "multiagent": { "policies": {"pol1", "pol2"}, "policies_to_train": ["pol1"], "policy_mapping_fn": lambda aid, episode, worker, **kwargs: "pol1", }, } config = AlgorithmConfig.from_dict(config_dict) self.assertFalse(config.in_evaluation) self.assertTrue(config.lr == 0.2) self.assertTrue(config.policies == {"pol1", "pol2"}) self.assertTrue(config.policy_mapping_fn(1, 2, 3) == "pol1") eval_config = config.get_evaluation_config_object() self.assertTrue(eval_config.in_evaluation) self.assertTrue(eval_config.lr == 0.1)
5af66e66cc6aa3859e2e15637c66f6decbec13b9
13
test_algorithm_config.py
238
[RLlib] AlgorithmConfigs: Broad rollout; Example scripts. (#29700)
30,578
0
258
146
45
135,275
55
ray
18
rllib/algorithms/tests/test_algorithm_config.py
Python
20
{ "docstring": "Tests, whether translation from dict to AlgorithmConfig works as expected.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
9
cov
def cov(self, ddof=1, split_every=None, split_out=1, std=False): if self.sort is None and split_out > 1: warnings.warn(SORT_SPLIT_OUT_WARNING, FutureWarning) levels = _determine_levels(self.by) is_mask = any(is_series_like(s) for s in self.by) if self._slice: if is_mask: self.obj = self.obj[self._slice] else: sliced_plus = list(self._slice) + list(self.by) self.obj = self.obj[sliced_plus] result = aca( [self.obj, self.by] if not isinstance(self.by, list) else [self.obj] + self.by, chunk=_cov_chunk, aggregate=_cov_agg, combine=_cov_combine, token=self._token_prefix + "cov", aggregate_kwargs={"ddof": ddof, "levels": levels, "std": std}, combine_kwargs={"levels": levels}, split_every=split_every, split_out=split_out, split_out_setup=split_out_on_index, sort=self.sort, ) if isinstance(self.obj, Series): result = result[result.columns[0]] if self._slice: result = result[self._slice] return result
72832d4bd685a25000ee646966a1aa5d0d8a1793
15
groupby.py
368
Groupby sort upstream compatibility (#9486)
36,829
0
400
244
68
157,016
87
dask
39
dask/dataframe/groupby.py
Python
31
{ "docstring": "Groupby covariance is accomplished by\n\n 1. Computing intermediate values for sum, count, and the product of\n all columns: a b c -> a*a, a*b, b*b, b*c, c*c.\n\n 2. The values are then aggregated and the final covariance value is calculated:\n cov(X, Y) = X*Y - Xbar * Ybar\n\n When `std` is True calculate Correlation\n ", "language": "en", "n_whitespaces": 102, "n_words": 54, "vocab_size": 48 }
https://github.com/dask/dask.git
4
copyin_matrix
def copyin_matrix(self, key, value): rlo, rhi, clo, chi = self.key2bounds(key) shape = value.shape dr, dc = rhi - rlo, chi - clo if shape != (dr, dc): raise ShapeError(filldedent("The Matrix `value` doesn't have the " "same dimensions " "as the in sub-Matrix given by `key`.")) for i in range(value.rows): for j in range(value.cols): self[i + rlo, j + clo] = value[i, j]
59d22b6bb7287613d598611027f640d068ca5748
13
repmatrix.py
150
Moved imports to higher level
47,899
0
219
96
47
196,399
62
sympy
19
sympy/matrices/repmatrix.py
Python
11
{ "docstring": "Copy in values from a matrix into the given bounds.\n\n Parameters\n ==========\n\n key : slice\n The section of this matrix to replace.\n value : Matrix\n The matrix to copy values from.\n\n Examples\n ========\n\n >>> from sympy import Matrix, eye\n >>> M = Matrix([[0, 1], [2, 3], [4, 5]])\n >>> I = eye(3)\n >>> I[:3, :2] = M\n >>> I\n Matrix([\n [0, 1, 0],\n [2, 3, 0],\n [4, 5, 1]])\n >>> I[0, 1] = M\n >>> I\n Matrix([\n [0, 0, 1],\n [2, 2, 3],\n [4, 4, 5]])\n\n See Also\n ========\n\n copyin_list\n ", "language": "en", "n_whitespaces": 287, "n_words": 90, "vocab_size": 59 }
https://github.com/sympy/sympy.git
8
step
def step(self) -> ResultDict: step_attempt_results = None with self._step_context() as step_ctx: while not step_ctx.should_stop(step_attempt_results): # Try to train one step. try: step_attempt_results = self.step_attempt() # @ray.remote RolloutWorker failure. except RayError as e: # Try to recover w/o the failed worker. if self.config["ignore_worker_failures"]: logger.exception("Error in train call, attempting to recover") self.try_recover_from_step_attempt() # Error out. else: logger.warning( "Worker crashed during call to `step_attempt()`. " "To try to continue training without the failed " "worker, set `ignore_worker_failures=True`." ) raise e # Any other exception. except Exception as e: # Allow logs messages to propagate. time.sleep(0.5) raise e result = step_attempt_results if hasattr(self, "workers") and isinstance(self.workers, WorkerSet): # Sync filters on workers. self._sync_filters_if_needed(self.workers) # Collect worker metrics. if self.config["_disable_execution_plan_api"]: result = self._compile_step_results( step_ctx=step_ctx, step_attempt_results=step_attempt_results, ) return result
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
19
trainer.py
254
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
30,128
0
726
142
90
133,868
123
ray
25
rllib/agents/trainer.py
Python
43
{ "docstring": "Implements the main `Trainer.train()` logic.\n\n Takes n attempts to perform a single training step. Thereby\n catches RayErrors resulting from worker failures. After n attempts,\n fails gracefully.\n\n Override this method in your Trainer sub-classes if you would like to\n handle worker failures yourself. Otherwise, override\n `self.step_attempt()` to keep the n attempts (catch worker failures).\n\n Returns:\n The results dict with stats/infos on sampling, training,\n and - if required - evaluation.\n ", "language": "en", "n_whitespaces": 146, "n_words": 68, "vocab_size": 58 }
https://github.com/ray-project/ray.git
19
deltaintegrate
def deltaintegrate(f, x): if not f.has(DiracDelta): return None # g(x) = DiracDelta(h(x)) if f.func == DiracDelta: h = f.expand(diracdelta=True, wrt=x) if h == f: # can't simplify the expression #FIXME: the second term tells whether is DeltaDirac or Derivative #For integrating derivatives of DiracDelta we need the chain rule if f.is_simple(x): if (len(f.args) <= 1 or f.args[1] == 0): return Heaviside(f.args[0]) else: return (DiracDelta(f.args[0], f.args[1] - 1) / f.args[0].as_poly().LC()) else: # let's try to integrate the simplified expression fh = integrate(h, x) return fh elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e g = f.expand() if f != g: # the expansion worked fh = integrate(g, x) if fh is not None and not isinstance(fh, Integral): return fh else: # no expansion performed, try to extract a simple DiracDelta term deltaterm, rest_mult = change_mul(f, x) if not deltaterm: if rest_mult: fh = integrate(rest_mult, x) return fh else: from sympy.solvers import solve deltaterm = deltaterm.expand(diracdelta=True, wrt=x) if deltaterm.is_Mul: # Take out any extracted factors deltaterm, rest_mult_2 = change_mul(deltaterm, x) rest_mult = rest_mult*rest_mult_2 point = solve(deltaterm.args[0], x)[0] # Return the largest hyperreal term left after # repeated integration by parts. For example, # # integrate(y*DiracDelta(x, 1),x) == y*DiracDelta(x,0), not 0 # # This is so Integral(y*DiracDelta(x).diff(x),x).doit() # will return y*DiracDelta(x) instead of 0 or DiracDelta(x), # both of which are correct everywhere the value is defined # but give wrong answers for nested integration. n = (0 if len(deltaterm.args)==1 else deltaterm.args[1]) m = 0 while n >= 0: r = S.NegativeOne**n*rest_mult.diff(x, n).subs(x, point) if r.is_zero: n -= 1 m += 1 else: if m == 0: return r*Heaviside(x - point) else: return r*DiracDelta(x,m-1) # In some very weak sense, x=0 is still a singularity, # but we hope will not be of any practical consequence. return S.Zero return None
f757f3daae6e11ea0cfb7dadc133274d8d74315f
26
deltafunctions.py
585
Reordered imports 2
48,168
0
1,179
364
173
196,780
297
sympy
40
sympy/integrals/deltafunctions.py
Python
48
{ "docstring": "\n deltaintegrate(f, x)\n\n Explanation\n ===========\n\n The idea for integration is the following:\n\n - If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)),\n we try to simplify it.\n\n If we could simplify it, then we integrate the resulting expression.\n We already know we can integrate a simplified expression, because only\n simple DiracDelta expressions are involved.\n\n If we couldn't simplify it, there are two cases:\n\n 1) The expression is a simple expression: we return the integral,\n taking care if we are dealing with a Derivative or with a proper\n DiracDelta.\n\n 2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do\n nothing at all.\n\n - If the node is a multiplication node having a DiracDelta term:\n\n First we expand it.\n\n If the expansion did work, then we try to integrate the expansion.\n\n If not, we try to extract a simple DiracDelta term, then we have two\n cases:\n\n 1) We have a simple DiracDelta term, so we return the integral.\n\n 2) We didn't have a simple term, but we do have an expression with\n simplified DiracDelta terms, so we integrate this expression.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y, z\n >>> from sympy.integrals.deltafunctions import deltaintegrate\n >>> from sympy import sin, cos, DiracDelta\n >>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x)\n sin(1)*cos(1)*Heaviside(x - 1)\n >>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y)\n z**2*DiracDelta(x - z)*Heaviside(y - z)\n\n See Also\n ========\n\n sympy.functions.special.delta_functions.DiracDelta\n sympy.integrals.integrals.Integral\n ", "language": "en", "n_whitespaces": 411, "n_words": 225, "vocab_size": 115 }
https://github.com/sympy/sympy.git
1
to_batches
def to_batches(self, *args, **kwargs): return self.table.to_batches(*args, **kwargs)
e35be138148333078284b942ccc9ed7b1d826f97
8
table.py
41
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
21,831
0
21
25
7
104,394
7
datasets
5
src/datasets/table.py
Python
2
{ "docstring": "\n Convert Table to list of (contiguous) RecordBatch objects.\n\n Args:\n max_chunksize (:obj:`int`, defaults to `None`):\n Maximum size for RecordBatch chunks. Individual chunks may be\n smaller depending on the chunk layout of individual columns.\n\n Returns:\n :obj:`List[pyarrow.RecordBatch]`:\n ", "language": "en", "n_whitespaces": 115, "n_words": 34, "vocab_size": 31 }
https://github.com/huggingface/datasets.git
5
check_for_updates
def check_for_updates() -> None: # The commit has was commented out because the terminal was crashing due to git import for multiple users # ({str(git.Repo('.').head.commit)[:7]}) try: r = requests.get( "https://api.github.com/repos/openbb-finance/openbbterminal/releases/latest", timeout=1, ) except Exception: r = None if r is not None and r.status_code == 200: release = r.json()["html_url"].split("/")[-1].replace("v", "") if obbff.VERSION == release: console.print("[green]You are using the latest version[/green]") else: console.print("[red]You are not using the latest version[/red]") console.print( "[yellow]Check for updates at https://openbb.co/products/terminal#get-started[/yellow]" ) else: console.print( "[yellow]Unable to check for updates... " + "Check your internet connection and try again...[/yellow]" ) console.print("")
7b374b76629a963cef9a22b41bc8acbef70e876a
16
terminal_helper.py
202
Add check for updates at terminal startup (#1839) * Add check for updates at terminal startup * Fix ycrv missing from documentation * Add new line after update check message Co-authored-by: didierlopes.eth <[email protected]> Co-authored-by: James Maslek <[email protected]>
84,784
0
276
109
70
284,534
93
OpenBBTerminal
15
openbb_terminal/terminal_helper.py
Python
27
{ "docstring": "Check if the latest version is running.\n\n Checks github for the latest release version and compares it to obbff.VERSION.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 16 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
load_array
def load_array(data_arrays, batch_size, is_train=True): dataset = gluon.data.ArrayDataset(*data_arrays) return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
9
mxnet.py
59
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
37,377
0
20
38
10
158,208
11
d2l-zh
10
d2l/mxnet.py
Python
3
{ "docstring": "Construct a Gluon data iterator.\n\n Defined in :numref:`sec_linear_concise`", "language": "en", "n_whitespaces": 10, "n_words": 8, "vocab_size": 8 }
https://github.com/d2l-ai/d2l-zh.git
16
a85decode
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): b = _bytes_from_decode_data(b) if adobe: if not b.endswith(_A85END): raise ValueError( "Ascii85 encoded byte sequences must end " "with {!r}".format(_A85END) ) if b.startswith(_A85START): b = b[2:-2] # Strip off start/end markers else: b = b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences # packI = struct.Struct('!I').pack decoded = [] decoded_append = decoded.append curr = [] curr_append = curr.append curr_clear = curr.clear for x in b + b'u' * 4: if b'!'[0] <= x <= b'u'[0]: curr_append(x) if len(curr) == 5: acc = 0 for x in curr: acc = 85 * acc + (x - 33) try: decoded_append(packI(acc)) except struct.error: raise ValueError('Ascii85 overflow') from None curr_clear() elif x == b'z'[0]: if curr: raise ValueError('z inside Ascii85 5-tuple') decoded_append(b'\0\0\0\0') elif foldspaces and x == b'y'[0]: if curr: raise ValueError('y inside Ascii85 5-tuple') decoded_append(b'\x20\x20\x20\x20') elif x in ignorechars: # Skip whitespace continue else: raise ValueError('Non-Ascii85 digit found: %c' % x) result = b''.join(decoded) padding = 4 - len(curr) if padding: # Throw away the extra padding result = result[:-padding] return result # The following code is originally taken (with permission) from Mercurial _b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") _b85chars = None _b85chars2 = None _b85dec = None
8198943edd73a363c266633e1aa5b2a9e9c9f526
17
base64.py
513
add python 3.10.4 for windows
56,193
0
686
281
138
221,085
212
XX-Net
34
python3.10.4/Lib/base64.py
Python
47
{ "docstring": "Decode the Ascii85 encoded bytes-like object or ASCII string b.\n\n foldspaces is a flag that specifies whether the 'y' short sequence should be\n accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is\n not supported by the \"standard\" Adobe encoding.\n\n adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.\n is framed with <~ and ~>).\n\n ignorechars should be a byte string containing characters to ignore from the\n input. This should only contain whitespace characters, and by default\n contains all whitespace characters in ASCII.\n\n The result is returned as a bytes object.\n ", "language": "en", "n_whitespaces": 126, "n_words": 96, "vocab_size": 71 }
https://github.com/XX-net/XX-Net.git
2
_load_formatters
def _load_formatters(module_name): mod = __import__(module_name, None, None, ['__all__']) for formatter_name in mod.__all__: cls = getattr(mod, formatter_name) _formatter_cache[cls.name] = cls
f3166e673fe8d40277b804d35d77dcdb760fc3b3
10
__init__.py
68
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,315
0
42
43
15
20,293
19
pipenv
10
pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py
Python
5
{ "docstring": "Load a formatter (and all others in the module too).", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/pypa/pipenv.git
1
test_resolved_in_release_performance_issue
def test_resolved_in_release_performance_issue(self, mock_func): event = self.create_performance_issue() notification = ResolvedInReleaseActivityNotification( Activity( project=self.project, group=event.group, user=self.user, type=ActivityType.SET_RESOLVED_IN_RELEASE, data={"version": "meow"}, ) ) with self.feature("organizations:performance-issues"), self.tasks(): notification.send() attachment, text = get_attachment() release_name = notification.activity.data["version"] assert text == f"Issue marked as resolved in {release_name} by {self.name}" assert attachment["title"] == "N+1 Query" assert ( attachment["text"] == "db - SELECT `books_author`.`id`, `books_author`.`name` FROM `books_author` WHERE `books_author`.`id` = %s LIMIT 21" ) assert ( attachment["footer"] == f"{self.project.slug} | production | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user|Notification Settings>" )
495d45c6547e398a5d4d3c1fa8cb97e69b1751f8
14
test_resolved_in_release.py
230
ref(slack): Update workflow alerts for perf issues (#40463) Slack workflow alerts for performance issues are showing a text value of "no value". This PR adds feature parity with error issues for workflow alerts so that they are shown with the proper data.
18,284
0
317
122
58
87,329
74
sentry
25
tests/sentry/integrations/slack/notifications/test_resolved_in_release.py
Python
25
{ "docstring": "\n Test that a Slack message is sent with the expected payload when a performance issue is resolved in a release\n ", "language": "en", "n_whitespaces": 35, "n_words": 20, "vocab_size": 17 }
https://github.com/getsentry/sentry.git
4
make_png
def make_png(cls, tex, fontsize, dpi): basefile = cls.get_basefile(tex, fontsize, dpi) pngfile = '%s.png' % basefile # see get_rgba for a discussion of the background if not os.path.exists(pngfile): dvifile = cls.make_dvi(tex, fontsize) cmd = ["dvipng", "-bg", "Transparent", "-D", str(dpi), "-T", "tight", "-o", pngfile, dvifile] # When testing, disable FreeType rendering for reproducibility; but # dvipng 1.16 has a bug (fixed in f3ff241) that breaks --freetype0 # mode, so for it we keep FreeType enabled; the image will be # slightly off. if (getattr(mpl, "_called_from_pytest", False) and mpl._get_executable_info("dvipng").raw_version != "1.16"): cmd.insert(1, "--freetype0") cls._run_checked_subprocess(cmd, tex) return pngfile
13147992b317c29c6e832ca7f6d05bf48aeb0718
14
texmanager.py
198
Move towards making texmanager stateless. Previously, TexManager needed to call get_font_config at a specific place in the middle of processing to update some internal attributes before proceeding with TeX source generation. Instead, move towards making TexManager stateless (except for caching), i.e. the user facing API should be thought of as a bunch of independently callable functions `make_tex()`, `make_dvi()`, etc. (they will probably stay as methods on a "empty" TexManager object for a long time for backcompat, in fact).
23,022
0
276
117
78
108,023
94
matplotlib
21
lib/matplotlib/texmanager.py
Python
12
{ "docstring": "\n Generate a png file containing latex's rendering of tex string.\n\n Return the file name.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/matplotlib/matplotlib.git
3
_wait_threads
def _wait_threads(self): # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = []
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
locators.py
68
upd; format
12,843
0
102
39
27
62,036
35
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py
Python
6
{ "docstring": "\n Tell all the threads to terminate (by sending a sentinel value) and\n wait for them to do so.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/jindongwang/transferlearning.git
2
table_id
def table_id(connection, table_name, schema_name=None): schema_idm = schema_id(connection=connection,schema_name=schema_name) q = f cur = connection.cursor() cur.execute(q) try: table_id = cur.fetchall()[0][0] except: raise exc.NoSuchTableError(table_name) return table_id
3d8422b16d8ad72005ffed2f7869eefe0b330c35
12
monet_get_id.py
117
utils used for getting schema_id and table_id
25,664
0
70
67
19
116,090
23
mindsdb
13
mindsdb/integrations/handlers/monetdb_handler/utils/monet_get_id.py
Python
15
{ "docstring": "Fetch the id for schema.table_name, defaulting to current schema if\n schema is None\n \n SELECT id\n FROM sys.tables\n WHERE name = '{table_name}'\n AND schema_id = {schema_idm}\n ", "language": "en", "n_whitespaces": 70, "n_words": 25, "vocab_size": 22 }
https://github.com/mindsdb/mindsdb.git
1
is_flow_ready
def is_flow_ready(self, **kwargs) -> bool: return run_async(self.client._is_flow_ready, **kwargs) dry_run = deprecate_by(is_flow_ready)
273fda5a86da0d6bf48f423fa50700e828b40be6
9
mixin.py
48
refactor: merge dryrun into ping (#5151) * refactor: merge dryrun into ping * style: fix overload and cli autocomplete * refactor: merge dryrun into ping * refactor: merge dryrun into ping * refactor: merge dryrun into ping * refactor: merge dryrun into ping Co-authored-by: Jina Dev Bot <[email protected]>
2,490
0
28
23
10
12,995
11
jina
9
jina/clients/mixin.py
Python
7
{ "docstring": "Check if the Flow is ready to receive requests\n\n :param kwargs: potential kwargs received passed from the public interface\n :return: boolean indicating the health/readiness of the Flow\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 23 }
https://github.com/jina-ai/jina.git
1
is_decompressed_wo_data
def is_decompressed_wo_data(self) -> bool: return type(self._pb_body) is jina_pb2.DataRequestProtoWoData
c3849c6fee4a65a77a82b2cfda9670d727ff0f53
9
data.py
33
feat: allow to access parameters of data request wo loading data (#4991)
2,382
0
22
19
8
12,696
8
jina
7
jina/types/request/data.py
Python
7
{ "docstring": "\n Checks if the underlying proto object was already deserialized into a :class:`jina.proto.jina_pb2.DataRequestProtoWoData`. It means that the proto is loaded without the data ( docs ).\n\n :return: True if the proto was deserialized before into a DataRequest without docs\n ", "language": "en", "n_whitespaces": 60, "n_words": 38, "vocab_size": 26 }
https://github.com/jina-ai/jina.git
3
_update_trackables
def _update_trackables(self): for trackable_obj in self._self_tracked_trackables: if isinstance( trackable_obj, tf.__internal__.tracking.TrackableDataStructure ): self._track_variables(trackable_obj)
00524152437b957ca4e850a5db014e223d3c6826
12
base_layer.py
54
isort, black and flake8 checked
83,115
0
78
33
12
279,734
12
keras
10
keras/engine/base_layer.py
Python
6
{ "docstring": "Track variables added to lists/dicts after creation", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/keras-team/keras.git
3
get_unpositioned_tip
def get_unpositioned_tip(self, tip_shape=None, tip_length=None): from manim.mobject.geometry.tips import ArrowTriangleFilledTip if tip_shape is None: tip_shape = ArrowTriangleFilledTip if tip_length is None: tip_length = self.get_default_tip_length() color = self.get_color() style = {"fill_color": color, "stroke_color": color} style.update(self.tip_style) tip = tip_shape(length=tip_length, **style) return tip
e040bcacd38378386749db18aeba575b93f4ebca
10
arc.py
134
Improved structure of the :mod:`.mobject` module (#2476) * group graphing and update its references * group text and update its references * group opengl and update its references * group three_d and update its references * group geometry and update (most) references * move some chaning.py + updater files into animation * refactor arc.py * refactor line.py * refactor polygram.py * refactor tips.py * black + isort * import new files in __init__.py * refactor places where geometry was used * black + isort again * remove unused imports * update reference.rst * add descriptions to files * fix circular imports * forgot ArrowTip * fix tests * fix doctests * satisfy mypy? * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix ALL merge conflicts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * one VMobject import slipped through * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * re-add imports to `manim/opengl/__init__.py` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix reference manual * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ignore unknown directive type * fix arrow tip imports in docstrings Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
46,162
0
123
83
27
189,675
38
manim
17
manim/mobject/geometry/arc.py
Python
11
{ "docstring": "\n Returns a tip that has been stylistically configured,\n but has not yet been given a position in space.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 15 }
https://github.com/ManimCommunity/manim.git
29
fit
def fit(self, X, y, sample_weight=None): solver = _check_solver(self.solver, self.penalty, self.dual) if not isinstance(self.C, numbers.Number) or self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) if self.penalty == "elasticnet": if ( not isinstance(self.l1_ratio, numbers.Number) or self.l1_ratio < 0 or self.l1_ratio > 1 ): raise ValueError( "l1_ratio must be between 0 and 1; got (l1_ratio=%r)" % self.l1_ratio ) elif self.l1_ratio is not None: warnings.warn( "l1_ratio parameter is only used when penalty is " "'elasticnet'. Got " "(penalty={})".format(self.penalty) ) if self.penalty == "none": if self.C != 1.0: # default values warnings.warn( "Setting penalty='none' will ignore the C and l1_ratio parameters" ) # Note that check for l1_ratio is done right above C_ = np.inf penalty = "l2" else: C_ = self.C penalty = self.penalty if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0: raise ValueError( "Maximum number of iteration must be positive; got (max_iter=%r)" % self.max_iter ) if not isinstance(self.tol, numbers.Number) or self.tol < 0: raise ValueError( "Tolerance for stopping criteria must be positive; got (tol=%r)" % self.tol ) if solver == "lbfgs": _dtype = np.float64 else: _dtype = [np.float64, np.float32] X, y = self._validate_data( X, y, accept_sparse="csr", dtype=_dtype, order="C", accept_large_sparse=solver not in ["liblinear", "sag", "saga"], ) check_classification_targets(y) self.classes_ = np.unique(y) multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_)) if solver == "liblinear": if effective_n_jobs(self.n_jobs) != 1: warnings.warn( "'n_jobs' > 1 does not have any effect when" " 'solver' is set to 'liblinear'. Got 'n_jobs'" " = {}.".format(effective_n_jobs(self.n_jobs)) ) self.coef_, self.intercept_, n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, sample_weight=sample_weight, ) self.n_iter_ = np.array([n_iter_]) return self if solver in ["sag", "saga"]: max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None n_classes = len(self.classes_) classes_ = self.classes_ if n_classes < 2: raise ValueError( "This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % classes_[0] ) if len(self.classes_) == 2: n_classes = 1 classes_ = classes_[1:] if self.warm_start: warm_start_coef = getattr(self, "coef_", None) else: warm_start_coef = None if warm_start_coef is not None and self.fit_intercept: warm_start_coef = np.append( warm_start_coef, self.intercept_[:, np.newaxis], axis=1 ) # Hack so that we iterate only once for the multinomial case. if multi_class == "multinomial": classes_ = [None] warm_start_coef = [warm_start_coef] if warm_start_coef is None: warm_start_coef = [None] * n_classes path_func = delayed(_logistic_regression_path) # The SAG solver releases the GIL so it's more efficient to use # threads for this solver. if solver in ["sag", "saga"]: prefer = "threads" else: prefer = "processes" fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( path_func( X, y, pos_class=class_, Cs=[C_], l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept, tol=self.tol, verbose=self.verbose, solver=solver, multi_class=multi_class, max_iter=self.max_iter, class_weight=self.class_weight, check_input=False, random_state=self.random_state, coef=warm_start_coef_, penalty=penalty, max_squared_sum=max_squared_sum, sample_weight=sample_weight, ) for class_, warm_start_coef_ in zip(classes_, warm_start_coef) ) fold_coefs_, _, n_iter_ = zip(*fold_coefs_) self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0] n_features = X.shape[1] if multi_class == "multinomial": self.coef_ = fold_coefs_[0][0] else: self.coef_ = np.asarray(fold_coefs_) self.coef_ = self.coef_.reshape( n_classes, n_features + int(self.fit_intercept) ) if self.fit_intercept: self.intercept_ = self.coef_[:, -1] self.coef_ = self.coef_[:, :-1] else: self.intercept_ = np.zeros(n_classes) return self
5f75acdd12d77b973471961ad716367c6199d01c
16
_logistic.py
1,352
MNT Bump joblib version dependency to 1.0.0 (#22365)
75,396
0
2,295
858
258
258,737
490
scikit-learn
81
sklearn/linear_model/_logistic.py
Python
152
{ "docstring": "\n Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : array-like of shape (n_samples,) default=None\n Array of weights that are assigned to individual samples.\n If not provided, then each sample is given unit weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to LogisticRegression.\n\n Returns\n -------\n self\n Fitted estimator.\n\n Notes\n -----\n The SAGA solver supports both float64 and float32 bit arrays.\n ", "language": "en", "n_whitespaces": 280, "n_words": 97, "vocab_size": 75 }
https://github.com/scikit-learn/scikit-learn.git
1
ds
def ds(self): return DeepsetCloudDocumentStore(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY, index=DC_TEST_INDEX) # Integration tests
255072d8d548a19a1678ddc46b41d41cf5d09bc4
8
test_deepsetcloud.py
35
refactor: move dC tests to their own module and job (#3529) * move dC tests to their own module and job * restore global var * revert
75,193
0
26
21
9
258,161
9
haystack
9
test/document_stores/test_deepsetcloud.py
Python
2
{ "docstring": "\n We make this fixture depend on `dc_api_mock` so that passing the document store will\n activate the mocking and we spare one function parameter.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 22 }
https://github.com/deepset-ai/haystack.git
6
_postprocess
def _postprocess(self, inputs): results = [] for examples, texts, temp_results in zip(inputs['batch_examples'], inputs['batch_texts'], inputs['batch_results']): for i in range(len(examples)): result = {} det_pred, char_preds, length = temp_results[i] pred_result = self._parse_decode(texts[i], char_preds, det_pred, length) result['source'] = texts[i] result['target'] = ''.join(pred_result) results.append(result) results = self._auto_joiner(results, self.input_mapping, is_dict=True) for result in results: errors_result = [] for i, (source_token, target_token ) in enumerate(zip(result['source'], result['target'])): if source_token != target_token: errors_result.append({ 'position': i, 'correction': { source_token: target_token } }) result['errors'] = errors_result return results
1e2ee01dade0d4076ba98aa613c3eb150c615abb
17
text_correction.py
297
Update Taskflow word_segmentation and ner tasks (#1666) * Add AutoSplitter & AutoJoiner * codestyle fix * unify auto joiner * add comments * add sentence split mode * update params * add paddle version check * add wordtag for word_segmentation * add wordtag for word_segmentation * add ner-lac and word_segmentation-jieba * add return entities only for ner * fix ci * fix ci * fix ci * fix ci * fix ci * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * fix bugs of dataloader * remove guard * use fast mode for rnn example * Update README.md * Update README.md
118,205
0
565
186
54
322,613
77
PaddleNLP
26
paddlenlp/taskflow/text_correction.py
Python
27
{ "docstring": "\n The model output is the logits and probs, this function will convert the model output to raw text.\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 15 }
https://github.com/PaddlePaddle/PaddleNLP.git
16
label_tag
def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None): contents = contents or self.label if label_suffix is None: label_suffix = ( self.field.label_suffix if self.field.label_suffix is not None else self.form.label_suffix ) # Only add the suffix if the label does not end in punctuation. # Translators: If found as last label character, these punctuation # characters will prevent the default label_suffix to be appended to the label if label_suffix and contents and contents[-1] not in _(":?.!"): contents = format_html("{}{}", contents, label_suffix) widget = self.field.widget id_ = widget.attrs.get("id") or self.auto_id if id_: id_for_label = widget.id_for_label(id_) if id_for_label: attrs = {**(attrs or {}), "for": id_for_label} if self.field.required and hasattr(self.form, "required_css_class"): attrs = attrs or {} if "class" in attrs: attrs["class"] += " " + self.form.required_css_class else: attrs["class"] = self.form.required_css_class context = { "field": self, "label": contents, "attrs": attrs, "use_tag": bool(id_), "tag": tag or "label", } return self.form.render(self.form.template_name_label, context)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
boundfield.py
380
Refs #33476 -- Reformatted code with Black.
51,286
0
497
230
94
205,927
142
django
23
django/forms/boundfield.py
Python
30
{ "docstring": "\n Wrap the given contents in a <label>, if the field has an ID attribute.\n contents should be mark_safe'd to avoid HTML escaping. If contents\n aren't given, use the field's HTML-escaped label.\n\n If attrs are given, use them as HTML attributes on the <label> tag.\n\n label_suffix overrides the form's label_suffix.\n ", "language": "en", "n_whitespaces": 92, "n_words": 49, "vocab_size": 39 }
https://github.com/django/django.git
3
__call__
def __call__(self, bbox_pred, gt_bboxes): if self.box_format == 'xywh': gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) elif self.box_format == 'xyxy': bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) return bbox_cost * self.weight @MATCH_COST.register_module()
cac356380d505bf15587f07c0529218cc36b9652
@MATCH_COST.register_module()
11
match_cost.py
103
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
70,199
1
84
56
23
244,027
28
mmdetection
14
mmdet/core/bbox/match_costs/match_cost.py
Python
7
{ "docstring": "\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n (num_query, 4).\n gt_bboxes (Tensor): Ground truth boxes with normalized\n coordinates (x1, y1, x2, y2). Shape (num_gt, 4).\n\n Returns:\n torch.Tensor: bbox_cost value with weight\n ", "language": "en", "n_whitespaces": 143, "n_words": 43, "vocab_size": 35 }
https://github.com/open-mmlab/mmdetection.git
2
_get_queryset
def _get_queryset(klass): # If it is a model class or anything else with ._default_manager if hasattr(klass, "_default_manager"): return klass._default_manager.all() return klass
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
shortcuts.py
44
Refs #33476 -- Reformatted code with Black.
51,389
0
40
24
20
206,164
21
django
5
django/shortcuts.py
Python
4
{ "docstring": "\n Return a QuerySet or a Manager.\n Duck typing in action: any class with a `get()` method (for\n get_object_or_404) or a `filter()` method (for get_list_or_404) might do\n the job.\n ", "language": "en", "n_whitespaces": 44, "n_words": 28, "vocab_size": 22 }
https://github.com/django/django.git
3
_get_classifier_artifacts
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return []
847eb6b22d03f0cffef945996cf835272870435a
8
utils.py
41
Improve confusion matrix plot (#5273) * update Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]>
2,909
0
29
187
13
19,198
13
mlflow
8
mlflow/sklearn/utils.py
Python
48
{ "docstring": "\n Draw and record various common artifacts for classifier\n\n For all classifiers, we always log:\n (1) confusion matrix:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html\n\n For only binary classifiers, we will log:\n (2) precision recall curve:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html\n (3) roc curve:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Steps:\n 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.\n 2. If the sample_weight argument exists in fit_func (accuracy_score by default\n has sample_weight), extract it from fit_args or fit_kwargs as\n (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)\n 3. return a list of artifacts path to be logged\n\n :param fitted_estimator: The already fitted regressor\n :param fit_args: Positional arguments given to fit_func.\n :param fit_kwargs: Keyword arguments given to fit_func.\n :return: List of artifacts to be logged\n ", "language": "en", "n_whitespaces": 178, "n_words": 117, "vocab_size": 91 }
https://github.com/mlflow/mlflow.git
1
mock_device_tracker_update_config
def mock_device_tracker_update_config(): with patch("homeassistant.components.device_tracker.legacy.update_config"): yield
31a787558fd312331b55e5c2c4b33341fc3601fc
10
test_init.py
29
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
88,492
0
18
12
5
289,350
5
core
2
tests/components/demo/test_init.py
Python
3
{ "docstring": "Prevent device tracker from creating known devices file.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
2
coplanar
def coplanar(self, other): aq = self.axis() ar = other.axis() return (aq == ar or aq == -ar)
e8c5f4fe692e863bf0a48573a1d0c7b92487c5c1
9
quaternion.py
56
hamilton
47,953
0
45
33
13
196,513
17
sympy
6
sympy/algebras/quaternion.py
Python
4
{ "docstring": "\n Returns if the two quaternions are coplanar or not.\n\n Parameters\n ==========\n\n other : a quaternion\n\n Examples\n ========\n\n >>> from sympy.algebras.quaternion import Quaternion\n >>> q = Quaternion(1, 4, 4, 4)\n >>> q1 = Quaternion(2, 8, 8, 8)\n >>> q.coplanar(q1)\n True\n\n >>> q1 = Quaternion(2, 8, 13, 12)\n >>> q.coplanar(q1)\n False\n\n ", "language": "en", "n_whitespaces": 155, "n_words": 49, "vocab_size": 36 }
https://github.com/sympy/sympy.git
2
_TLS_Ext_CertTypeDispatcher
def _TLS_Ext_CertTypeDispatcher(m, *args, **kargs): tmp_len = struct.unpack("!H", m[2:4])[0] if tmp_len == 1: cls = TLS_Ext_ServerCertType else: cls = TLS_Ext_ClientCertType return cls(m, *args, **kargs)
2001f35c7a9762d2d58f5ec402d93d10bbfd8e10
11
extensions.py
87
Fix spellcheck
52,772
0
52
54
18
209,767
23
scapy
10
scapy/layers/tls/extensions.py
Python
7
{ "docstring": "\n We need to select the correct one on dissection. We use the length for\n that, as 1 for client version would imply an empty list.\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 22 }
https://github.com/secdev/scapy.git
2
_set_gradient_checkpointing
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Data2VecAudioEncoder, Data2VecAudioFeatureEncoder)): module.gradient_checkpointing = value DATA2VEC_AUDIO_START_DOCSTRING = r DATA2VEC_AUDIO_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top.", DATA2VEC_AUDIO_START_DOCSTRING, )
df5a4094a6e3f98f2cb2058cdb688fcc3f453220
@add_start_docstrings( "The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top.", DATA2VEC_AUDIO_START_DOCSTRING, )
9
modeling_data2vec_audio.py
69
Add Data2Vec (#15507) * Add data2vec model cloned from roberta * Add checkpoint conversion script * Fix copies * Update docs * Add checkpoint conversion script * Remove fairseq data2vec_text script and fix format * Add comment on where to get data2vec_text.py * Remove mock implementation cheat.py and fix style * Fix copies * Remove TF and Flax classes from init * Add back copy from fairseq data2vec_text.py and fix style * Update model name in docs/source/index.mdx to be CamelCase * Revert model name in table to lower-case to get check_table test to pass * Update src/transformers/models/data2vec/__init__.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/convert_data2vec_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update docs/source/model_doc/data2vec.mdx Co-authored-by: Sylvain Gugger <[email protected]> * Update docs/source/model_doc/data2vec.mdx Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/auto/configuration_auto.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update tests/test_modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update documentation * Copy-paste Data2VecConfig from BertConfig * Update config checkpoint to point to edugp/data2vec-nlp-base. Fix style and repo-consistency * Update config special tokens to match RoBERTa * Split multiple assertions and add individual error messages * Rename Data2VecModel to Data2VecForTextModel * Add Data2Vec to _toctree.yml * Rename Data2VecEmbeddings to Data2VecForTextEmbeddings * Add initial Data2VecForAudio model (unfinished). Only matching fairseq's implementation up to the feature encoder (before positional encoding). * finish audio model * finish audio file * Update names and fix style, quality and repo consistency * Remove Data2VecAudioForPretraining. Add tests for Data2VecAudio, mimicking the Wav2Vec2 test suite. Fix bias initilization in positional conv layers. Move back configurations for audio and text to separate files. * add inputs to logits to data2vec' * correct autio models * correct config auto * correct tok auto * Update utils/tests_fetcher.py * delete unnecessary files * delete unnecessary files * further renaming * make all tests pass * finish * remove useless test file * Update tests/test_modeling_common.py * Update utils/check_repo.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec_text.py Co-authored-by: Patrick von Platen <[email protected]> * Fix copies * Update docs * Remove fairseq data2vec_text script and fix format * Add comment on where to get data2vec_text.py * Remove mock implementation cheat.py and fix style * Fix copies * Remove TF and Flax classes from init * Add back copy from fairseq data2vec_text.py and fix style * Update model name in docs/source/index.mdx to be CamelCase * Revert model name in table to lower-case to get check_table test to pass * Update documentation * Update src/transformers/models/data2vec/__init__.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/convert_data2vec_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/auto/configuration_auto.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update tests/test_modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/configuration_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec.py Co-authored-by: Sylvain Gugger <[email protected]> * Copy-paste Data2VecConfig from BertConfig * Update config checkpoint to point to edugp/data2vec-nlp-base. Fix style and repo-consistency * Update config special tokens to match RoBERTa * Split multiple assertions and add individual error messages * Rename Data2VecModel to Data2VecForTextModel * Add Data2Vec to _toctree.yml * Rename Data2VecEmbeddings to Data2VecForTextEmbeddings * Add initial Data2VecForAudio model (unfinished). Only matching fairseq's implementation up to the feature encoder (before positional encoding). * finish audio model * finish audio file * add inputs to logits to data2vec' * Update names and fix style, quality and repo consistency * Remove Data2VecAudioForPretraining. Add tests for Data2VecAudio, mimicking the Wav2Vec2 test suite. Fix bias initilization in positional conv layers. Move back configurations for audio and text to separate files. * correct autio models * correct config auto * correct tok auto * delete unnecessary files * delete unnecessary files * Update utils/tests_fetcher.py * further renaming * make all tests pass * finish * remove useless test file * Update tests/test_modeling_common.py * Update utils/check_repo.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/models/data2vec/modeling_data2vec_text.py Co-authored-by: Patrick von Platen <[email protected]> * Move data2vec tests to new structure * Fix test imports for text tests * Remove fairseq files * Change paper link to arxiv * Modify Data2Vec documentation to reflect that the encoder is not shared across the audio and text models in the current implementation. * Update text model checkpoint to be facebook/data2vec-text-base * Add 'Copy from' statements and update paper links and docs * fix copy from statements * improve copied from * correct more copied from statements * finish copied from stuff * make style * add model to README * add to master Co-authored-by: Eduardo Gonzalez Ponferrada <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
6,521
1
53
28
31
35,671
34
transformers
11
src/transformers/models/data2vec/modeling_data2vec_audio.py
Python
3
{ "docstring": "\n Data2VecAudio was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and\n Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and\n Michael Auli.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`Data2VecAudioConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding\n and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, such as\n [data2vec-audio-base](https://huggingface.co/facebook/data2vec-audio-base-960h), `attention_mask` should\n **not** be passed to avoid degraded performance when doing batched inference. For such models\n `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these\n models also yield slightly different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 705, "n_words": 369, "vocab_size": 222 }
https://github.com/huggingface/transformers.git
1
print_help
def print_help(self): help_text = f console.print(text=help_text, menu="Econometrics")
f2ca215132de40804667feb4deaa0c6b8bfc3d25
12
econometrics_controller.py
60
Econometrics Menu (#1403) * Add Statistics Menu * Add Granger Causality test * Apply Black formatting * Add Cointegration Tests * Adjust plotting for Cointegration test * Add Significant parameter to Cointegration tests * Move regression functions to seperate .py files * Progress on Panel Data * A lot of progress for Panel Data * Make functions robust and improve documentation * Re-enable Breusch-Godfrey * Add modify functionality * Improve modify functionality * Add Breusch-Pagan heteroscedasticity test * Capitalize a word * Include documentatin for the Statistics Menu * Update _index.md * Update _index.md * Update _index.md * Fix export statements and add Example datasets * Update example with Longley's dataset * Update documentation with a legit source * Compare the results from each regression models based on the wage_panel dataset * Updated with all relevant types of regression tables * Update with all relevant regression types for Panel data * Update _index.md * Add change column type, improve OLS, add time and entity effects for FE * Update documentation and fix a small bug * Add in Statistics menu, replacing Custom menu * Remove custom menu * Add in documentation * Add in gst files * Cointegration can be used on any amount of columns * Add Tests * Make tests time invariant * Update Poetry and Requirements * Change name of Statistics menu to Econometrics menu * Rename scripts * Add type in Documentation * Change names from Statistics to Econometrics * Add graph * Update tests with rounding and naming * Make minor adjustments to fix the tests * Updating tests : allow extra args for capture * Apply recorder formatting * Adding some minor formatting * Fix error with MyPy * Attempt to fix MyPy annoyance * super small style things * Fix small bugs and add plot command to dwat * Small description mistake * Update documentation with missing argument * Update styling * Add error handling and add improve show functionality * Fix merge issue * Allow import from custom_imports Co-authored-by: Jeroen Bouma <[email protected]> Co-authored-by: jmaslek <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
84,276
0
28
22
7
282,726
7
OpenBBTerminal
9
gamestonk_terminal/econometrics/econometrics_controller.py
Python
35
{ "docstring": "Print help[cmds]\n load load in custom data sets\n export export a dataset\n remove remove a dataset\n options show available column-dataset options[/cmds]\n\n[param]Loaded files:[/param] {\", \".join(self.files) or None}[cmds]\n\nExploration\n show show a portion of a loaded dataset\n plot plot data from a dataset\n type change types of the columns or display their types\n desc show descriptive statistics of a dataset\n index set (multi) index based on columns\n clean clean a dataset by filling or dropping NaNs\n modify combine columns of datasets and delete or rename columns\n\nTimeseries\n ols fit a (multi) linear regression model\n norm perform normality tests on a column of a dataset\n root perform unitroot tests (ADF & KPSS) on a column of a dataset\n\nPanel Data\n panel Estimate model based on various regression techniques\n compare Compare results of all estimated models\n\nTests\n dwat perform Durbin-Watson autocorrelation test on the residuals of the regression\n bgod perform Breusch-Godfrey autocorrelation tests with lags on the residuals of the regression\n bpag perform Breusch-Pagan heteroscedasticity test on the residuals of the regression\n granger perform Granger causality tests on two columns\n coint perform co-integration test on a multitude of columns[/cmds]\n ", "language": "en", "n_whitespaces": 467, "n_words": 186, "vocab_size": 103 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
7
print_dict
def print_dict(d, logger, delimiter=0): for k, v in sorted(d.items()): if isinstance(v, dict): logger.info("{}{} : ".format(delimiter * " ", str(k))) print_dict(v, logger, delimiter + 4) elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): logger.info("{}{} : ".format(delimiter * " ", str(k))) for value in v: print_dict(value, logger, delimiter + 4) else: logger.info("{}{} : {}".format(delimiter * " ", k, v)) @functools.lru_cache()
11f6ff38dcc61348aa4aae8ad2fbbe42b0eab34d
@functools.lru_cache()
16
utils.py
234
add supplementary
4,507
1
156
139
37
23,061
60
PaddleOCR
18
test_tipc/supplementary/utils.py
Python
11
{ "docstring": "\n Recursively visualize a dict and\n indenting acrrording by the relationship of keys.\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
https://github.com/PaddlePaddle/PaddleOCR.git
6
is_tradesignal
def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long))
8eeaab27467fa2e0bdc7314bdb888998bbb20af8
14
RLPrediction_env.py
102
add reward function
34,793
0
90
65
20
150,565
31
freqtrade
10
freqtrade/freqai/prediction_models/RL/RLPrediction_env.py
Python
4
{ "docstring": "\n not trade signal is :\n Action: Neutral, position: Neutral -> Nothing\n Action: Long, position: Long -> Hold Long\n Action: Short, position: Short -> Hold Short\n ", "language": "en", "n_whitespaces": 61, "n_words": 25, "vocab_size": 16 }
https://github.com/freqtrade/freqtrade.git
3
testPlacementGroupDistributedTraining
def testPlacementGroupDistributedTraining(self, reuse_actors=False): head_bundle = {"CPU": 1, "GPU": 0, "custom": 0} child_bundle = {"CPU": 1} placement_group_factory = PlacementGroupFactory( [head_bundle, child_bundle, child_bundle, child_bundle] )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
9
test_trial_runner_pg.py
77
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,793
0
69
213
19
132,695
23
ray
7
python/ray/tune/tests/test_trial_runner_pg.py
Python
31
{ "docstring": "Run distributed training using placement groups.\n\n Each trial requests 4 CPUs and starts 4 remote training workers.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 15 }
https://github.com/ray-project/ray.git
11
start
def start(self) -> 'Deployment': if self.is_sandbox and not self._sandbox_deployed: self.update_sandbox_args() if self.pod_args['uses_before'] is not None: _args = self.pod_args['uses_before'] if getattr(self.args, 'noblock_on_start', False): _args.noblock_on_start = True self.uses_before_pod = PodFactory.build_pod(_args) self.enter_context(self.uses_before_pod) if self.pod_args['uses_after'] is not None: _args = self.pod_args['uses_after'] if getattr(self.args, 'noblock_on_start', False): _args.noblock_on_start = True self.uses_after_pod = PodFactory.build_pod(_args) self.enter_context(self.uses_after_pod) if self.pod_args['head'] is not None: _args = self.pod_args['head'] if getattr(self.args, 'noblock_on_start', False): _args.noblock_on_start = True self.head_pod = PodFactory.build_pod(_args) self.enter_context(self.head_pod) for shard_id in self.pod_args['pods']: self.shards[shard_id] = self._ReplicaSet( self.args, self.pod_args['pods'][shard_id], self.head_pod, ) self.enter_context(self.shards[shard_id]) if not getattr(self.args, 'noblock_on_start', False): self.activate() return self
7c4c39a9d82c58ef2493c21a288c755901a9594e
13
__init__.py
417
fix: do not deploy sandbox on init (#4844)
2,287
0
420
256
43
12,429
87
jina
20
jina/orchestrate/deployments/__init__.py
Python
40
{ "docstring": "\n Start to run all :class:`Pod` in this BaseDeployment.\n\n :return: started deployment\n\n .. note::\n If one of the :class:`Pod` fails to start, make sure that all of them\n are properly closed.\n ", "language": "en", "n_whitespaces": 81, "n_words": 30, "vocab_size": 26 }
https://github.com/jina-ai/jina.git
1
test_roots
def test_roots(self): with DAG("test_dag", start_date=DEFAULT_DATE) as dag: op1 = EmptyOperator(task_id="t1") op2 = EmptyOperator(task_id="t2") op3 = EmptyOperator(task_id="t3") op4 = EmptyOperator(task_id="t4") op5 = EmptyOperator(task_id="t5") [op1, op2] >> op3 >> [op4, op5] assert set(dag.roots) == {op1, op2}
49e336ae0302b386a2f47269a6d13988382d975f
12
test_dag.py
148
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
9,164
0
125
86
28
47,576
34
airflow
15
tests/models/test_dag.py
Python
9
{ "docstring": "Verify if dag.roots returns the root tasks of a DAG.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/apache/airflow.git
9
_post_validate_environment
def _post_validate_environment(self, attr, value, templar): env = {} if value is not None:
884244f1b2da3c3f367e064ef4ac0123fcb12675
7
task.py
35
Python 3.9 min for controller (#77566)
79,001
0
34
137
13
267,689
13
ansible
6
lib/ansible/playbook/task.py
Python
22
{ "docstring": "\n Override post validation of vars on the play, as we don't want to\n template these too early.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/ansible/ansible.git
3
get_edge_data
def get_edge_data(self, u, v, key=None, default=None): try: if key is None: return self._adj[u][v] else: return self._adj[u][v][key] except KeyError: return default
8f4c99debc9440728c5e85f8bffa5d26b232eb6f
14
multigraph.py
83
Multigraph docs update (#5389) * Updated MultiDiGraph documentation to include more examples of actually using parallel edges, and fixed references to things like G[u, v] where G[u, v, k] is required for a MultiDigraph. Have not made parallel changes in MultiGraph which should maybe also be made? Docs tests pass on my end; no code outside of comments was changed. -Peter Mawhorter * Updated docs for MultiGraph to add more multigraph-specific examples and fix a few places where untested examples were wrong. -Peter Mawhorter * [DOC] fix typo * add the right amount of separators Co-authored-by: Mridul Seth <[email protected]>
41,879
0
104
55
18
176,414
20
networkx
8
networkx/classes/multigraph.py
Python
8
{ "docstring": "Returns the attribute dictionary associated with edge (u, v,\n key).\n\n If a key is not provided, returns a dictionary mapping edge keys\n to attribute dictionaries for each edge between u and v.\n\n This is identical to `G[u][v][key]` except the default is returned\n instead of an exception is the edge doesn't exist.\n\n Parameters\n ----------\n u, v : nodes\n\n default : any Python object (default=None)\n Value to return if the specific edge (u, v, key) is not\n found, OR if there are no edges between u and v and no key\n is specified.\n\n key : hashable identifier, optional (default=None)\n Return data only for the edge with specified key, as an\n attribute dictionary (rather than a dictionary mapping keys\n to attribute dictionaries).\n\n Returns\n -------\n edge_dict : dictionary\n The edge attribute dictionary, OR a dictionary mapping edge\n keys to attribute dictionaries for each of those edges if no\n specific key is provided (even if there's only one edge\n between u and v).\n\n Examples\n --------\n >>> G = nx.MultiGraph() # or MultiDiGraph\n >>> key = G.add_edge(0, 1, key=\"a\", weight=7)\n >>> G[0][1][\"a\"] # key='a'\n {'weight': 7}\n >>> G.edges[0, 1, \"a\"] # key='a'\n {'weight': 7}\n\n Warning: we protect the graph data structure by making\n `G.edges` and `G[1][2]` read-only dict-like structures.\n However, you can assign values to attributes in e.g.\n `G.edges[1, 2, 'a']` or `G[1][2]['a']` using an additional\n bracket as shown next. You need to specify all edge info\n to assign to the edge data associated with an edge.\n\n >>> G[0][1][\"a\"][\"weight\"] = 10\n >>> G.edges[0, 1, \"a\"][\"weight\"] = 10\n >>> G[0][1][\"a\"][\"weight\"]\n 10\n >>> G.edges[1, 0, \"a\"][\"weight\"]\n 10\n\n >>> G = nx.MultiGraph() # or MultiDiGraph\n >>> nx.add_path(G, [0, 1, 2, 3])\n >>> G.edges[0, 1, 0][\"weight\"] = 5\n >>> G.get_edge_data(0, 1)\n {0: {'weight': 5}}\n >>> e = (0, 1)\n >>> G.get_edge_data(*e) # tuple form\n {0: {'weight': 5}}\n >>> G.get_edge_data(3, 0) # edge not in graph, returns None\n >>> G.get_edge_data(3, 0, default=0) # edge not in graph, return default\n 0\n >>> G.get_edge_data(1, 0, 0) # specific key gives back\n {'weight': 5}\n ", "language": "en", "n_whitespaces": 778, "n_words": 330, "vocab_size": 166 }
https://github.com/networkx/networkx.git
2
test_nested_condition_not_filters
def test_nested_condition_not_filters(self, ds, documents): ds.write_documents(documents) filters = { "$not": { "$or": { "$and": {"numbers": {"$lt": 5.0}, "month": {"$ne": "01"}}, "$not": {"year": {"$lte": "2021", "$gte": "2020"}}, } } } result = ds.get_all_documents(filters=filters) assert len(result) == 3 docs_meta = result[0].meta["numbers"] assert [2, 4] == docs_meta # Test same logical operator twice on same level filters = { "$or": [ {"$and": {"name": {"$in": ["name_0", "name_1"]}, "year": {"$gte": "2020"}}}, {"$and": {"name": {"$in": ["name_0", "name_1"]}, "year": {"$lt": "2021"}}}, ] } result = ds.get_all_documents(filters=filters) docs_meta = [doc.meta["name"] for doc in result] assert len(result) == 4 assert "name_0" in docs_meta assert "name_2" not in docs_meta
2bb81331b75aec68de0d45c4cb116170d265f1fe
17
test_base.py
366
feat: add SQLDocumentStore tests (#3517) * port SQL tests * cleanup document_store_tests.py from sql tests * leftover * Update .github/workflows/tests.yml Co-authored-by: Sara Zan <[email protected]> * review comments * Update test/document_stores/test_base.py Co-authored-by: bogdankostic <[email protected]> Co-authored-by: Sara Zan <[email protected]> Co-authored-by: bogdankostic <[email protected]>
75,190
0
352
202
61
258,126
98
haystack
12
test/document_stores/test_base.py
Python
25
{ "docstring": "\n Test nested logical operations within \"$not\", important as we apply De Morgan's laws in WeaviateDocumentstore\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 15 }
https://github.com/deepset-ai/haystack.git
2
_project_modified
def _project_modified(self): return any(var.get() for var in self._modified_vars.values())
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
11
project.py
43
Typing - lib.gui.display_command
21,303
0
22
25
8
101,924
8
faceswap
7
lib/gui/project.py
Python
2
{ "docstring": "bool: ``True`` if the project has been modified otherwise ``False``. ", "language": "en", "n_whitespaces": 10, "n_words": 10, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
1
test_get_pdu_event_from_cache_is_pristine
def test_get_pdu_event_from_cache_is_pristine(self): # Get the PDU in the cache remote_pdu = self._get_pdu_once() # Modify the the event reference. # This change should not make it back to the `_get_pdu_cache`. remote_pdu.internal_metadata.outlier = True # Get the event again. This time it should read it from cache. remote_pdu2 = self.get_success( self.hs.get_federation_client().get_pdu( ["yet.another.server"], remote_pdu.event_id, RoomVersions.V9, ) ) # Sanity check that we are working against the same event self.assertEqual(remote_pdu.event_id, remote_pdu2.event_id) # Make sure the event does not include modification from earlier self.assertIsNotNone(remote_pdu2) self.assertEqual(remote_pdu2.internal_metadata.outlier, False)
0f971ca68e808dd16f53f5594a6b33b7bddcc9a9
13
test_federation_client.py
135
Update `get_pdu` to return the original, pristine `EventBase` (#13320) Update `get_pdu` to return the untouched, pristine `EventBase` as it was originally seen over federation (no metadata added). Previously, we returned the same `event` reference that we stored in the cache which downstream code modified in place and added metadata like setting it as an `outlier` and essentially poisoned our cache. Now we always return a copy of the `event` so the original can stay pristine in our cache and re-used for the next cache call. Split out from https://github.com/matrix-org/synapse/pull/13205 As discussed at: - https://github.com/matrix-org/synapse/pull/13205#discussion_r918365746 - https://github.com/matrix-org/synapse/pull/13205#discussion_r918366125 Related to https://github.com/matrix-org/synapse/issues/12584. This PR doesn't fix that issue because it hits [`get_event` which exists from the local database before it tries to `get_pdu`](https://github.com/matrix-org/synapse/blob/7864f33e286dec22368dc0b11c06eebb1462a51e/synapse/federation/federation_client.py#L581-L594).
72,509
0
246
80
56
248,918
81
synapse
16
tests/federation/test_federation_client.py
Python
13
{ "docstring": "Test that modifications made to events returned by `get_pdu()`\n do not propagate back to to the internal cache (events returned should\n be a copy).\n ", "language": "en", "n_whitespaces": 45, "n_words": 24, "vocab_size": 21 }
https://github.com/matrix-org/synapse.git
5
_store_rejected_events_txn
def _store_rejected_events_txn(self, txn, events_and_contexts): # Remove the rejected events from the list now that we've added them # to the events table and the events_json table. to_remove = set() for event, context in events_and_contexts: if context.rejected: # Insert the event_id into the rejections table # (events.rejection_reason has already been done) self._store_rejections_txn(txn, event.event_id, context.rejected) to_remove.add(event) return [ec for ec in events_and_contexts if ec[0] not in to_remove]
2aa37a4250675f6d9feb57ec0dce65b2a6a3cde6
12
events.py
100
Add `state_key` and `rejection_reason` to `events` (#11792) ... and start populating them for new events
70,993
0
178
63
51
246,084
65
synapse
13
synapse/storage/databases/main/events.py
Python
7
{ "docstring": "Add rows to the 'rejections' table for received events which were\n rejected\n\n Args:\n txn (twisted.enterprise.adbapi.Connection): db connection\n events_and_contexts (list[(EventBase, EventContext)]): events\n we are persisting\n\n Returns:\n list[(EventBase, EventContext)] new list, without the rejected\n events.\n ", "language": "en", "n_whitespaces": 124, "n_words": 33, "vocab_size": 30 }
https://github.com/matrix-org/synapse.git
5
read_template
def read_template(self): log.info("reading manifest template '%s'", self.template) template = TextFile(self.template, strip_comments=1, skip_blanks=1, join_lines=1, lstrip_ws=1, rstrip_ws=1, collapse_join=1) try: while True: line = template.readline() if line is None: # end of file break try: self.filelist.process_template_line(line) # the call above can raise a DistutilsTemplateError for # malformed lines, or a ValueError from the lower-level # convert_path function except (DistutilsTemplateError, ValueError) as msg: self.warn("%s, line %d: %s" % (template.filename, template.current_line, msg)) finally: template.close()
8198943edd73a363c266633e1aa5b2a9e9c9f526
17
sdist.py
180
add python 3.10.4 for windows
56,758
0
453
110
59
222,820
69
XX-Net
23
python3.10.4/Lib/distutils/command/sdist.py
Python
18
{ "docstring": "Read and parse manifest template file named by self.template.\n\n (usually \"MANIFEST.in\") The parsing and processing is done by\n 'self.filelist', which updates itself accordingly.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 21 }
https://github.com/XX-net/XX-Net.git
3
_clone
def _clone(self) -> HasProps: attrs = self.properties_with_values(include_defaults=False, include_undefined=True) return self.__class__(**{key: val for key, val in attrs.items() if val is not Undefined}) KindRef = Any # TODO
b23a3b77447ede916b31756fca997cbb1b806de7
12
has_props.py
84
Discover unstable defaults in `HasProps.__init__()` (#11959) * Discover unstable defaults in HasProps.__init__() * Make HasProps.__getattr__() fail properly * More sensible implementation of HasProps._clone() * Make InstanceDefault a generic class * Fix recursive model definition in tests * Fix default override in test_document * Add unit tests
53,205
0
46
49
23
212,208
26
bokeh
14
bokeh/core/has_props.py
Python
8
{ "docstring": " Duplicate a HasProps object.\n\n Values that are containers are shallow-copied.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
https://github.com/bokeh/bokeh.git
1
timetz
def timetz(self) -> npt.NDArray[np.object_]: return ints_to_pydatetime(self.asi8, self.tz, box="time")
521259299f7829da667ba39302ec77acedde9e5e
9
datetimes.py
49
DOC: Improve doc summaries in series.rst (#45237)
39,395
0
22
30
8
163,187
8
pandas
10
pandas/core/arrays/datetimes.py
Python
8
{ "docstring": "\n Returns numpy array of :class:`datetime.time` objects with timezone\n information.\n\n The time part of the Timestamps.\n ", "language": "en", "n_whitespaces": 44, "n_words": 15, "vocab_size": 14 }
https://github.com/pandas-dev/pandas.git
3
add_project
def add_project(self, project): from sentry.models import Project try: with atomic_transaction(using=router.db_for_write(ReleaseProject)): created = ReleaseProject.objects.get_or_create(project=project, release=self)[1] if not project.flags.has_releases: project.flags.has_releases = True project.update(flags=F("flags").bitor(Project.flags.has_releases)) except IntegrityError: created = False return created
5ceb67a008be09eac939346e9882d2ec244b7e45
19
release.py
150
fix(releases): Fix bug where project.has_releases would be false when there are releases associated with the project (#34205) We increment `new_groups` in `ReleaseProject` in buffers. If `ReleaseProject` doesn't already exist, then buffers will also create the row. The problem here is that we usually rely on `Release.add_project` to create a `ReleaseProject` row, which also makes sure we set the `has_releases` flag on the project. To fix this, we add a receive on `buffer_incr_complete` and set the flag there if it is not already set. Also put some defensive code in `add_project`, although I don't think it will make a large difference. This should make it so that going forward we'll properly set this flag. Will follow up with a backfill to correct the data.
19,630
0
153
91
24
99,254
28
sentry
21
src/sentry/models/release.py
Python
11
{ "docstring": "\n Add a project to this release.\n\n Returns True if the project was added and did not already exist.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/getsentry/sentry.git
3
build
def build(self, var_list): super().build(var_list) if getattr(self, "_built", False): return self._built = True self._momentums = [] self._velocities = [] self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype) # Keep a counter on how many times of _u_product has been computed to # avoid duplicated computations. self._u_product_counter = 1 for var in var_list: self._momentums.append( self.add_variable_from_reference( model_variable=var, variable_name="m" ) ) self._velocities.append( self.add_variable_from_reference( model_variable=var, variable_name="v" ) )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
nadam.py
182
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,352
0
289
113
48
275,258
59
keras
18
keras/optimizers/optimizer_experimental/nadam.py
Python
20
{ "docstring": "Initialize optimizer variables.\n\n Nadam optimizer has 2 types of variables: momentums and velocities.\n\n Args:\n var_list: list of model variables to build Nadam variables on.\n ", "language": "en", "n_whitespaces": 54, "n_words": 24, "vocab_size": 20 }
https://github.com/keras-team/keras.git
1
test_conflicting_autogenerated_basenames
def test_conflicting_autogenerated_basenames(self): self.router.register(r'notes', NoteViewSet) with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_kwduplicate', KWargedNoteViewSet) with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_duplicate', NoteViewSet)
48a21aa0eb3a95d32456c2a927eff9552a04231e
10
test_routers.py
94
raise ImproperlyConfigured exception if `basename` is not unique (#8438) * raise ImproperlyConfigured if basename already exists * rename already_registered function; return True/False * additional basename tests * additional basename tests * Update rest_framework/routers.py Co-authored-by: David Graves <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
9,585
0
62
55
9
48,734
12
django-rest-framework
9
tests/test_routers.py
Python
6
{ "docstring": "\n Ensure 2 routers with the same model, and no basename specified\n throws an ImproperlyConfigured exception\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/encode/django-rest-framework.git
8
meshgrid
def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): ndim = len(xi) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) for i, x in enumerate(xi)] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + s0[2:] output[1].shape = (-1, 1) + s0[2:] if not sparse: # Return the full N-D matrix (not only the 1-D vector) output = np.broadcast_arrays(*output, subok=True) if copy: output = [x.copy() for x in output] return output
85a2a7776e8cc01ce3efdc92e262a7764f5fc061
13
function_base.py
275
DOC: Fix typo in meshgrid example coordinate was misspelled in a comment in the example code [ci skip]
38,447
0
198
172
67
159,925
98
numpy
19
numpy/lib/function_base.py
Python
16
{ "docstring": "\n Return coordinate matrices from coordinate vectors.\n\n Make N-D coordinate arrays for vectorized evaluations of\n N-D scalar/vector fields over N-D grids, given\n one-dimensional coordinate arrays x1, x2,..., xn.\n\n .. versionchanged:: 1.9\n 1-D and 0-D cases are allowed.\n\n Parameters\n ----------\n x1, x2,..., xn : array_like\n 1-D arrays representing the coordinates of a grid.\n indexing : {'xy', 'ij'}, optional\n Cartesian ('xy', default) or matrix ('ij') indexing of output.\n See Notes for more details.\n\n .. versionadded:: 1.7.0\n sparse : bool, optional\n If True the shape of the returned coordinate array for dimension *i*\n is reduced from ``(N1, ..., Ni, ... Nn)`` to\n ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are\n intended to be use with :ref:`basics.broadcasting`. When all\n coordinates are used in an expression, broadcasting still leads to a\n fully-dimensonal result array.\n\n Default is False.\n\n .. versionadded:: 1.7.0\n copy : bool, optional\n If False, a view into the original arrays are returned in order to\n conserve memory. Default is True. Please note that\n ``sparse=False, copy=False`` will likely return non-contiguous\n arrays. Furthermore, more than one element of a broadcast array\n may refer to a single memory location. If you need to write to the\n arrays, make copies first.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n X1, X2,..., XN : ndarray\n For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,\n return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'\n or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'\n with the elements of `xi` repeated to fill the matrix along\n the first dimension for `x1`, the second for `x2` and so on.\n\n Notes\n -----\n This function supports both indexing conventions through the indexing\n keyword argument. Giving the string 'ij' returns a meshgrid with\n matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.\n In the 2-D case with inputs of length M and N, the outputs are of shape\n (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case\n with inputs of length M, N and P, outputs are of shape (N, M, P) for\n 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is\n illustrated by the following code snippet::\n\n xv, yv = np.meshgrid(x, y, indexing='ij')\n for i in range(nx):\n for j in range(ny):\n # treat xv[i,j], yv[i,j]\n\n xv, yv = np.meshgrid(x, y, indexing='xy')\n for i in range(nx):\n for j in range(ny):\n # treat xv[j,i], yv[j,i]\n\n In the 1-D and 0-D case, the indexing and sparse keywords have no effect.\n\n See Also\n --------\n mgrid : Construct a multi-dimensional \"meshgrid\" using indexing notation.\n ogrid : Construct an open multi-dimensional \"meshgrid\" using indexing\n notation.\n\n Examples\n --------\n >>> nx, ny = (3, 2)\n >>> x = np.linspace(0, 1, nx)\n >>> y = np.linspace(0, 1, ny)\n >>> xv, yv = np.meshgrid(x, y)\n >>> xv\n array([[0. , 0.5, 1. ],\n [0. , 0.5, 1. ]])\n >>> yv\n array([[0., 0., 0.],\n [1., 1., 1.]])\n >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays\n >>> xv\n array([[0. , 0.5, 1. ]])\n >>> yv\n array([[0.],\n [1.]])\n\n `meshgrid` is very useful to evaluate functions on a grid. If the\n function depends on all coordinates, you can use the parameter\n ``sparse=True`` to save memory and computation time.\n\n >>> x = np.linspace(-5, 5, 101)\n >>> y = np.linspace(-5, 5, 101)\n >>> # full coordinate arrays\n >>> xx, yy = np.meshgrid(x, y)\n >>> zz = np.sqrt(xx**2 + yy**2)\n >>> xx.shape, yy.shape, zz.shape\n ((101, 101), (101, 101), (101, 101))\n >>> # sparse coordinate arrays\n >>> xs, ys = np.meshgrid(x, y, sparse=True)\n >>> zs = np.sqrt(xs**2 + ys**2)\n >>> xs.shape, ys.shape, zs.shape\n ((1, 101), (101, 1), (101, 101))\n >>> np.array_equal(zz, zs)\n True\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.contourf(x, y, zs)\n >>> plt.axis('scaled')\n >>> plt.colorbar()\n >>> plt.show()\n ", "language": "en", "n_whitespaces": 1123, "n_words": 609, "vocab_size": 321 }
https://github.com/numpy/numpy.git