ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
55,049
217,975
109
python3.10.4/Lib/imaplib.py
39
8
def xatom(self, name, *args): name = name.upper() #if not name in self.capabilities: # Let the server decide! # raise self.error('unknown extension command: %s' % name) if not name in Commands: Commands[name] = (self.state,) return self._simple_command(name, *args) # Private
add python 3.10.4 for windows
xatom
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
imaplib.py
10
5
https://github.com/XX-net/XX-Net.git
2
45
0
32
75
Python
{ "docstring": "Allow simple extension commands\n notified by server in CAPABILITY response.\n\n Assumes command is legal in current state.\n\n (typ, [data]) = <instance>.xatom(name, arg, ...)\n\n Returns response appropriate to extension command `name'.\n ", "language": "en", "n_whitespaces": 73, "n_words": 30, "vocab_size": 27 }
def xatom(self, name, *args): name = name.upper() #if not name in self.capabilities: # Let the server decide! # raise self.error('unknown extension command: %s' % name) if not name in Commands: Commands[name] = (self.state,) return self._simple_command(name, *args) # Private methods
199
1,493
69
packages/syft/src/syft/core/tensor/nn/loss.py
34
11
def forward(self, outputs, targets): outputs = outputs.clip(self.epsilon, 1 - self.epsilon) log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1) log_loss = log_loss.sum(axi
Moved all code from notebook to codebase Took 19 minutes
forward
f3b8f6f1196e6f8a92620b4efc190715273fecab
PySyft
loss.py
14
5
https://github.com/OpenMined/PySyft.git
1
76
0
23
123
Python
{ "docstring": "Forward pass.\n\n .. math:: L = -t \\\\log(p) - (1 - t) \\\\log(1 - p)\n\n Parameters\n ----------\n outputs : numpy.array\n Predictions in (0, 1), such as sigmoidal output of a neural network.\n targets : numpy.array\n Targets in [0, 1], such as ground truth labels.\n ", "language": "en", "n_whitespaces": 108, "n_words": 44, "vocab_size": 37 }
def forward(self, outputs, targets): outputs = outputs.clip(self.epsilon, 1 - self.epsilon) log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1) log_loss = log_loss.sum(axis=1) * -1 return log_loss.mean()
41,834
176,320
61
networkx/algorithms/assortativity/correlation.py
36
13
def numeric_assortativity_coefficient(G, attribute, nodes=None): if nodes is None: nodes
MAINT: Cleanup assortativity module, remove unused variables (#5301) Remove unused variables, sort imports, raise errors instead of accepting invalid arguments silently Co-authored-by: Dan Schult <[email protected]>
numeric_assortativity_coefficient
34d9d630bb02426d297d3e20fedb7da8c3ced03a
networkx
correlation.py
10
7
https://github.com/networkx/networkx.git
4
75
0
28
111
Python
{ "docstring": "Compute assortativity for numerical node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given numeric attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key.\n\n nodes: list or iterable (optional)\n Compute numeric assortativity only for attributes of nodes in\n container. The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], size=2)\n >>> G.add_nodes_from([2, 3], size=3)\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.numeric_assortativity_coefficient(G, \"size\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation\n coefficient of the specified (scalar valued) attribute across edges.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n ", "language": "en", "n_whitespaces": 244, "n_words": 129, "vocab_size": 99 }
def numeric_assortativity_coefficient(G, attribute, nodes=None): if nodes is None: nodes = G.nodes vals = {G.nodes[n][attribute] for n in nodes} mapping = {d: i for i, d, in enumerate(vals)} M = attribute_mixing_matrix(G, attribute, nodes, mapping) return _numeric_ac(M, mapping)
80,425
270,311
25
keras/distribute/distributed_file_utils.py
13
11
def write_filepath(filepath, strategy): dir
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
write_filepath
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
distributed_file_utils.py
9
4
https://github.com/keras-team/keras.git
1
44
0
12
70
Python
{ "docstring": "Returns the writing file path to be used to save file distributedly.\n\n Directory to contain `filepath` would be created if it doesn't exist.\n\n Args:\n filepath: Original filepath that would be used without distribution.\n strategy: The tf.distribute strategy object currently used.\n\n Returns:\n The writing filepath that should be used to save file with distribution.\n ", "language": "en", "n_whitespaces": 80, "n_words": 53, "vocab_size": 36 }
def write_filepath(filepath, strategy): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) return os.path.join(write_dirpath(dirpath, strategy), base)
@add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, )
5,922
32,423
53
src/transformers/models/convnext/modeling_tf_convnext.py
30
9
def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) CONVNEXT_START_DOCSTRING = r CONVNEXT_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare
Update serving code to enable `saved_model=True` (#18153) * Add serving_output and serving methods to some vision models * Add serving outputs for DeiT * Don't convert hidden states - differing shapes * Make saveable * Fix up * Make swin saveable * Add in tests * Fix funnel tests (can't convert to tensor) * Fix numpy call * Tidy up a bit * Add in hidden states - resnet * Remove numpy * Fix failing tests - tensor shape and skipping tests * Remove duplicated function * PR comments - formatting and var names * PR comments Add suggestions made by Joao Gante: * Use tf.shape instead of shape_list * Use @tooslow decorator on tests * Simplify some of the logic * PR comments Address Yih-Dar Sheih comments - making tensor names consistent and make types float * Types consistent with docs; disable test on swin (slow) * CI trigger * Change input_features to float32 * Add serving_output for segformer * Fixup Co-authored-by: Amy Roberts <[email protected]>
serving
8e8384663d716d4b5a4f510070ff954fc0ba4a52
transformers
modeling_tf_convnext.py
8
3
https://github.com/huggingface/transformers.git
1
23
1
27
67
Python
{ "docstring": "\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n </Tip>\n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See\n [`ConvNextFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n", "language": "en", "n_whitespaces": 518, "n_words": 298, "vocab_size": 171 }
def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) CONVNEXT_START_DOCSTRING = r CONVNEXT_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, )
55,333
218,477
224
python3.10.4/Lib/inspect.py
71
12
def getclasstree(classes, unique=False): children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if parent not in children: children[parent] = [] if c not in children[parent]: children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in
add python 3.10.4 for windows
getclasstree
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
16
17
https://github.com/XX-net/XX-Net.git
11
112
0
41
191
Python
{ "docstring": "Arrange the given list of classes into a hierarchy of nested lists.\n\n Where a nested list appears, it contains classes derived from the class\n whose entry immediately precedes the list. Each entry is a 2-tuple\n containing a class and a tuple of its base classes. If the 'unique'\n argument is true, exactly one entry appears in the returned structure\n for each class in the given list. Otherwise, classes using multiple\n inheritance and their descendants will appear multiple times.", "language": "en", "n_whitespaces": 98, "n_words": 78, "vocab_size": 53 }
def getclasstree(classes, unique=False): children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if parent not in children: children[parent] = [] if c not in children[parent]: children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in children: if parent not in classes: roots.append(parent) return walktree(roots, children, None) # ------------------------------------------------ argument list extraction Arguments = namedtuple('Arguments', 'args, varargs, varkw')
12,574
61,435
31
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
10
4
def get_revision(cls, location): # type: (s
upd; format
get_revision
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
versioncontrol.py
6
2
https://github.com/jindongwang/transferlearning.git
1
10
0
10
19
Python
{ "docstring": "\n Return the current commit id of the files at the given location.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
def get_revision(cls, location): # type: (str) -> str raise NotImplementedError
11,185
55,038
42
src/prefect/settings.py
20
9
def get_current_settings() -> Settings: from prefect.context import ProfileContext profile = ProfileCo
Rewrite temporary settings to use copy_with_update
get_current_settings
95b47e807fa5ccc626a06efc2cced0d8ff8eadfa
prefect
settings.py
8
10
https://github.com/PrefectHQ/prefect.git
2
34
0
18
58
Python
{ "docstring": "\n Returns a settings object populated with values from the current profile or, if no\n profile is active, the environment.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
def get_current_settings() -> Settings: from prefect.context import ProfileContext profile = ProfileContext.get() if profile is not None: return profile.settings return get_settings_from_env()
46,020
189,215
456
tests/unit/customizations/s3/test_comparator.py
101
31
def test_compare_key_greater(self): self.not_at_dest_sync_strategy.determine_should_sync.return_value = False # Try when the sync strategy says to sync the file. self.not_at_src_syn
Delete extra whitespace A correction that does not affect the operation.
test_compare_key_greater
8a16d7d8ce5e3f97fb100af7a960224f7f80137d
aws-cli
test_comparator.py
10
30
https://github.com/aws/aws-cli.git
3
230
0
53
378
Python
{ "docstring": "\n Confirm the appropriate action is taken when the soruce compare key\n is greater than the destination compare key.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 14 }
def test_compare_key_greater(self): self.not_at_dest_sync_strategy.determine_should_sync.return_value = False # Try when the sync strategy says to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = True src_files = [] dest_files = [] ref_list = [] result_list = [] time = datetime.datetime.now() src_file = FileStat(src='', dest='', compare_key='domparator_test.py', size=10, last_update=time, src_type='local', dest_type='s3', operation_name='upload') dest_file = FileStat(src='', dest='', compare_key='comparator_test.py', size=10, last_update=time, src_type='s3', dest_type='local', operation_name='') src_files.append(src_file) dest_files.append(dest_file) ref_list.append(dest_file) files = self.comparator.call(iter(src_files), iter(dest_files)) for filename in files: result_list.append(filename) self.assertEqual(result_list, ref_list) # Now try when the sync strategy says not to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = False result_list = [] ref_list = [] files = self.comparator.call(iter(src_files), iter(dest_files)) for filename in files: result_list.append(filename) self.assertEqual(result_list, ref_list)
50,133
202,469
95
tests/custom_lookups/tests.py
20
18
def test_custom_exact_lookup_none_rhs(self): field = Author._meta.get_field("birthdate") OldExactLookup = field.get_lookup("exact") author = Author.objects.create(name="author", birthdate=None) try: field.register_lookup(Exactly, "exact"
Refs #33476 -- Reformatted code with Black.
test_custom_exact_lookup_none_rhs
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
12
9
https://github.com/django/django.git
2
77
0
17
134
Python
{ "docstring": "\n __exact=None is transformed to __isnull=True if a custom lookup class\n with lookup_name != 'exact' is registered as the `exact` lookup.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 19 }
def test_custom_exact_lookup_none_rhs(self): field = Author._meta.get_field("birthdate") OldExactLookup = field.get_lookup("exact") author = Author.objects.create(name="author", birthdate=None) try: field.register_lookup(Exactly, "exact") self.assertEqual(Author.objects.get(birthdate__exact=None), author) finally: field.register_lookup(OldExactLookup, "exact")
1,621
9,461
127
reconstruction/ostec/external/stylegan2/metrics/precision_recall.py
63
13
def batch_pairwise_distances(U, V): with tf.variable_scope('pairwise_dist_block'): # Squared norms of each row in U and V. norm_u = tf.reduce_sum(tf.square(U), 1) norm_v = tf.reduce_sum(tf.square(V), 1) # norm_u as a row and norm_v as a column vectors. norm_u = tf.reshape(norm_u, [-1, 1]) norm_v = tf.reshape(norm_v, [1, -1]) # Pairwise squared Euclidean distances. D = tf.maximum(norm_u - 2*tf.matmul(U, V, False, True) + norm_v, 0.0) return D #-----------------------------
initialize ostec
batch_pairwise_distances
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
precision_recall.py
15
8
https://github.com/deepinsight/insightface.git
1
107
0
47
167
Python
{ "docstring": " Compute pairwise distances between two batches of feature vectors.", "language": "en", "n_whitespaces": 9, "n_words": 9, "vocab_size": 9 }
def batch_pairwise_distances(U, V): with tf.variable_scope('pairwise_dist_block'): # Squared norms of each row in U and V. norm_u = tf.reduce_sum(tf.square(U), 1) norm_v = tf.reduce_sum(tf.square(V), 1) # norm_u as a row and norm_v as a column vectors. norm_u = tf.reshape(norm_u, [-1, 1]) norm_v = tf.reshape(norm_v, [1, -1]) # Pairwise squared Euclidean distances. D = tf.maximum(norm_u - 2*tf.matmul(U, V, False, True) + norm_v, 0.0) return D #----------------------------------------------------------------------------
5,702
31,219
109
src/transformers/image_utils.py
27
11
def expand_dims(self, image): self._ensure_format
Enable crop_center method to handle (W, H, C) images (#17626) * enable crop_center method to handle (W, H, C) images * minor style and comment edits
expand_dims
49becbaa5549b477b0d96c55f207614773c0ab42
transformers
image_utils.py
12
9
https://github.com/huggingface/transformers.git
3
58
0
19
95
Python
{ "docstring": "\n Expands 2-dimensional `image` to 3 dimensions.\n\n Args:\n image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\n The image to expand.\n ", "language": "en", "n_whitespaces": 65, "n_words": 17, "vocab_size": 14 }
def expand_dims(self, image): self._ensure_format_supported(image) # Do nothing if PIL image if isinstance(image, PIL.Image.Image): return image if is_torch_tensor(image): image = image.unsqueeze(0) else: image = np.expand_dims(image, axis=0) return image
9,128
47,487
238
tests/jobs/test_scheduler_job.py
68
38
def test_find_executable_task_instances_order_execution_date(self, dag_maker): dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_or
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
test_find_executable_task_instances_order_execution_date
49e336ae0302b386a2f47269a6d13988382d975f
airflow
test_scheduler_job.py
13
22
https://github.com/apache/airflow.git
3
193
0
48
314
Python
{ "docstring": "\n Test that task instances follow execution_date order priority. If two dagruns with\n different execution dates are scheduled, tasks with earliest dagrun execution date will first\n be executed\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 25 }
def test_find_executable_task_instances_order_execution_date(self, dag_maker): dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-a' dag_id_2 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-b' task_id = 'task-a' session = settings.Session() with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr1 = dag_maker.create_dagrun(execution_date=DEFAULT_DATE + timedelta(hours=1)) with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr2 = dag_maker.create_dagrun() dr1 = session.merge(dr1, load=False) self.scheduler_job = SchedulerJob(subdir=os.devnull) tis = dr1.task_instances + dr2.task_instances for ti in tis: ti.state = State.SCHEDULED session.merge(ti) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=1, session=session) session.flush() assert [ti.key for ti in res] == [tis[1].key] session.rollback()
18,290
87,377
332
src/sentry/web/frontend/base.py
89
30
def dispatch(self, request, *args, **kwargs): self.determine_active_organization(request, kwargs.get("organization_slug", None)) if self.csrf_protect: if hasattr(self.dispatch.__func__, "csrf_exempt"): delattr(self.dispatch.__func__, "csrf_exempt") response = self.test_csrf(request) if response: return response if self.is_auth_required(request, *args, **kwargs): return self.handle_auth_required(request, *args, **kwargs) if self.is_sudo_required(request, *args, **kwargs): return self.handle_sudo_required(request, *a
chore(hybrid-cloud): Refactor Organization ORM out of views and auth (#40362) For hybrid cloud, the organization and related models will not exist in the control silo, but will be necessary for certain auth related flows. This change is the first of many to make the core auth flows compatible with a split silo world by introducing a service object that captures existing needs for an organization arond the `get_active_organization` method. Behavior should remain identical, except that the pure ORM object is not available in many places. Those places have been updated to use a new thinner model object that corresponds with future control silo's data availability. Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
dispatch
a882713d1b8fc6f30ba7e8717252334d6720caa9
sentry
base.py
13
25
https://github.com/getsentry/sentry.git
10
268
0
47
415
Python
{ "docstring": "\n A note on the CSRF protection process.\n\n Because the CSRF decorators don't work well with view subclasses, we\n allow them to control whether a CSRF check is done by setting\n self.csrf_protect. This has a couple of implications:\n\n 1. We need to mark this method as @csrf_exempt so that when the CSRF\n middleware checks it as part of the regular middleware sequence, it\n always passes.\n 2. If self.csrf_protect is set, we will re-run the CSRF check ourselves\n using CsrfViewMiddleware().process_view()\n 3. But first we must remove the csrf_exempt attribute that was set by\n the decorator so that the middleware doesn't shortcut and pass the\n check unconditionally again.\n\n ", "language": "en", "n_whitespaces": 212, "n_words": 105, "vocab_size": 77 }
def dispatch(self, request, *args, **kwargs): self.determine_active_organization(request, kwargs.get("organization_slug", None)) if self.csrf_protect: if hasattr(self.dispatch.__func__, "csrf_exempt"): delattr(self.dispatch.__func__, "csrf_exempt") response = self.test_csrf(request) if response: return response if self.is_auth_required(request, *args, **kwargs): return self.handle_auth_required(request, *args, **kwargs) if self.is_sudo_required(request, *args, **kwargs): return self.handle_sudo_required(request, *args, **kwargs) args, kwargs = self.convert_args(request, *args, **kwargs) request.access = self.get_access(request, *args, **kwargs) if not self.has_permission(request, *args, **kwargs): return self.handle_permission_required(request, *args, **kwargs) if "organization" in kwargs: org = kwargs["organization"] if self.is_member_disabled_from_limit(request, org): return self.handle_disabled_member(org) if self.is_not_2fa_compliant(request, org): return self.handle_not_2fa_compliant(request, *args, **kwargs) self.request = request self.default_context = self.get_context_data(request, *args, **kwargs) return self.handle(request, *args, **kwargs)
20,640
101,220
32
lib/align/detected_face.py
11
4
def interpolator(self) -> int: assert self._interpolator is
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
interpolator
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
detected_face.py
7
4
https://github.com/deepfakes/faceswap.git
1
19
0
10
32
Python
{ "docstring": " int: The cv2 interpolator required to transpose the mask to a full frame. ", "language": "en", "n_whitespaces": 14, "n_words": 13, "vocab_size": 12 }
def interpolator(self) -> int: assert self._interpolator is not None return self._interpolator
28,754
128,590
29
python/ray/tune/tests/test_cluster.py
17
6
def test_cluster_interrupt(start_connected_cluster, tmpdir): cluster = start_connected_cluster dirpath = str(tmpdir) # Needs to be in scope for pytest
[tune] Store sync config/checkpoint config in experiment, trial (#29019) This is some clean-up required for future changes to the syncing/checkpointing behavior. At the moment we pass single attributes of these configs to the Experiment class, and then subsequently to the Trial class, from which it is passed on to the trainable. If we extend the configurability in the future (e.g. provide fallback mechanisms in the checkpoint config, or make retry wait times configurable in the sync config), we would have to add more and more attributes to these intermediate classes. Instead, we should just pass and store the full config. As a next follow-up, we can pass these configs to the Trainable. Signed-off-by: Kai Fricke <[email protected]>
test_cluster_interrupt
e142be077f0c727ab11ba51ecaba9a98b7bfe474
ray
test_cluster.py
8
75
https://github.com/ray-project/ray.git
11
335
0
16
31
Python
{ "docstring": "Tests run_experiment on cluster shutdown with actual interrupt.\n\n This is an end-to-end test.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
def test_cluster_interrupt(start_connected_cluster, tmpdir): cluster = start_connected_cluster dirpath = str(tmpdir) # Needs to be in scope for pytest
71,656
247,400
200
tests/rest/media/v1/test_oembed.py
85
8
def test_version(self) -> None: for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version, "type": "link"}) # An empty Open Graph response is an error, ensure the URL is included. self.assertIn("og:url", result.open_graph_result) # A missing version should be treated as 1.0. result = self.parse_response({"type": "link"})
Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey`
test_version
7e91107be1a4287873266e588a3c5b415279f4c8
synapse
test_oembed.py
13
10
https://github.com/matrix-org/synapse.git
3
119
0
50
200
Python
{ "docstring": "Accept versions that are similar to 1.0 as a string or int (or missing).", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
def test_version(self) -> None: for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version, "type": "link"}) # An empty Open Graph response is an error, ensure the URL is included. self.assertIn("og:url", result.open_graph_result) # A missing version should be treated as 1.0. result = self.parse_response({"type": "link"}) self.assertIn("og:url", result.open_graph_result) # Invalid versions should be rejected. for version in ("2.0", "1", 1.1, 0, None, {}, []): result = self.parse_response({"version": version, "type": "link"}) # An empty Open Graph response is an error, ensure the URL is included. self.assertEqual({}, result.open_graph_result)
15,845
72,190
100
wagtail/admin/tests/test_userbar.py
30
14
def test_page_allowing_subpages(self): response = self.client.get( reverse("wagtailadmin_userbar_frontend", args=(self.event_index.id,)) ) # page allows subpages, so the 'add page' button should show expected_url = reverse(
Reformat with black
test_page_allowing_subpages
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_userbar.py
14
16
https://github.com/wagtail/wagtail.git
1
63
0
27
106
Python
{ "docstring": "\n <a href=\"{expected_url}\" target=\"_parent\" role=\"menuitem\">\n <svg class=\"icon icon-plus wagtail-action-icon\" aria-hidden=\"true\" focusable=\"false\">\n <use href=\"#icon-plus\"></use>\n </svg>\n Add a child page\n </a>\n ", "language": "en", "n_whitespaces": 116, "n_words": 18, "vocab_size": 18 }
def test_page_allowing_subpages(self): response = self.client.get( reverse("wagtailadmin_userbar_frontend", args=(self.event_index.id,)) ) # page allows subpages, so the 'add page' button should show expected_url = reverse( "wagtailadmin_pages:add_subpage", args=(self.event_index.id,) ) needle = f self.assertTagInHTML(needle, str(response.content))
38,944
161,199
132
mkgui/app.py
39
21
def render_output_ui(self, streamlit_app, input) -> None: # type: ignore src, result = self.__root__ streamlit_app.subheader("Synthesized Audio") streamlit_app.audio(result.content, format="audio/wav") fig, ax = plt.subplots() ax.imshow(src.mel, aspect="equal", interpolation="none") ax.set_title("mel spectrogram(Source Audio)") streamlit_app.pyplot(fig) fig, ax = plt.subplots() ax.imshow(result.mel, aspect="equal", interpolation="none") ax.set_title("mel spectrogram(Result
Upgrade to new web service (#529) * Init new GUI * Remove unused codes * Reset layout * Add samples * Make framework to support multiple pages * Add vc mode * Add preprocessing mode * Add training mode * Remove text input in vc mode * Add entry for GUI and revise readme * Move requirement together * Add error raise when no model folder found * Add readme
render_output_ui
c5d03fb3cbf5105aa45dc131474260cf140b748b
MockingBird
app.py
9
15
https://github.com/babysor/MockingBird.git
1
111
0
29
192
Python
{ "docstring": "Custom output UI.\n If this method is implmeneted, it will be used instead of the default Output UI renderer.\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 19 }
def render_output_ui(self, streamlit_app, input) -> None: # type: ignore src, result = self.__root__ streamlit_app.subheader("Synthesized Audio") streamlit_app.audio(result.content, format="audio/wav") fig, ax = plt.subplots() ax.imshow(src.mel, aspect="equal", interpolation="none") ax.set_title("mel spectrogram(Source Audio)") streamlit_app.pyplot(fig) fig, ax = plt.subplots() ax.imshow(result.mel, aspect="equal", interpolation="none") ax.set_title("mel spectrogram(Result Audio)") streamlit_app.pyplot(fig)
@keras_export("keras.dtensor.experimental.layout_map_scope", v1=[]) @contextlib.contextmanager
80,492
270,593
21
keras/dtensor/layout_map.py
10
11
def get_default_mesh(self): return self._default_mesh LayoutMap.get.__doc__ = LayoutMap
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
get_default_mesh
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
layout_map.py
8
2
https://github.com/keras-team/keras.git
1
10
1
10
60
Python
{ "docstring": "Return the default `Mesh` set at instance creation.\n\n The `Mesh` can be used to create default replicated `Layout` when there\n isn't a match of the input string query.\n ", "language": "en", "n_whitespaces": 49, "n_words": 28, "vocab_size": 25 }
def get_default_mesh(self): return self._default_mesh LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__ @keras_export("keras.dtensor.experimental.layout_map_scope", v1=[]) @contextlib.contextmanager
35,845
154,186
27
modin/pandas/indexing.py
12
5
def __setitem__(self, key, item): # pragma: no cover raise NotImplementedError("Implemented by subclasses")
REFACTOR-#4730: make Indexers immutable (#4731) Signed-off-by: Brock Mendel <[email protected]>
__setitem__
8e1190c9979a1df26ea570f3ad2ccd822ad54c8e
modin
indexing.py
8
2
https://github.com/modin-project/modin.git
1
15
0
12
28
Python
{ "docstring": "\n Assign `item` value to dataset located by `key`.\n\n Parameters\n ----------\n key : callable or tuple\n The global row numbers to assign data to.\n item : modin.pandas.DataFrame, modin.pandas.Series or scalar\n Value that should be assigned to located dataset.\n\n See Also\n --------\n pandas.DataFrame.iloc\n ", "language": "en", "n_whitespaces": 127, "n_words": 41, "vocab_size": 36 }
def __setitem__(self, key, item): # pragma: no cover raise NotImplementedError("Implemented by subclasses")
50,113
202,407
130
tests/csrf_tests/tests.py
42
21
def test_bad_origin_cannot_be_parsed(self): req = self._get_POST_request_with_token() req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_ORIGIN"] = "https://[" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) self.assertIs(mw._origin_verified(req), False) with self.assertLogs("django.security.csrf", "WARNING") as cm: response = mw.process_view(req, post_form_view, (), {}) self.assertEqual(response.status_code, 403) msg = REASON_BAD_ORIGIN % req.META[
Refs #33476 -- Reformatted code with Black.
test_bad_origin_cannot_be_parsed
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
11
12
https://github.com/django/django.git
1
123
0
35
210
Python
{ "docstring": "\n A POST request with an origin that can't be parsed by urlparse() is\n rejected.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
def test_bad_origin_cannot_be_parsed(self): req = self._get_POST_request_with_token() req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_ORIGIN"] = "https://[" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) self.assertIs(mw._origin_verified(req), False) with self.assertLogs("django.security.csrf", "WARNING") as cm: response = mw.process_view(req, post_form_view, (), {}) self.assertEqual(response.status_code, 403) msg = REASON_BAD_ORIGIN % req.META["HTTP_ORIGIN"] self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % msg)
23,516
109,299
18
lib/mpl_toolkits/mplot3d/axis3d.py
12
6
def move_from_center(coord, centers, deltas, axmask=(True, True, True)): return _move_from_center
Deprecate helper functions in axis3d
move_from_center
b89ed5752c2a3b4eb9c9a3bf57848f543765fd6d
matplotlib
axis3d.py
8
6
https://github.com/matplotlib/matplotlib.git
1
33
0
10
46
Python
{ "docstring": "\n For each coordinate where *axmask* is True, move *coord* away from\n *centers* by *deltas*.\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 14 }
def move_from_center(coord, centers, deltas, axmask=(True, True, True)): return _move_from_center(coord, centers, deltas, axmask=axmask)
29,113
130,122
497
python/ray/_private/function_manager.py
162
17
def get_execution_info(self, job_id, function_descriptor): function_id = function_descriptor.function_id # If the function has already been loaded, # There's no need to load again if function_id in self._function_execution_info: return self._function_execution_info[function_id] if self._worker.load_code_from_local: # Load function from local code. if not function_descriptor.is_actor_method(): # If the function is not able to be loaded, # try to load it from GCS, # even if load_code_from_local is set True if self._load_function_from_local(function_descriptor) is True: return self._function_execution_info[function_id] # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push w
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
get_execution_info
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
function_manager.py
13
21
https://github.com/ray-project/ray.git
6
118
0
99
206
Python
{ "docstring": "Get the FunctionExecutionInfo of a remote function.\n Args:\n job_id: ID of the job that the function belongs to.\n function_descriptor: The FunctionDescriptor of the function to get.\n Returns:\n A FunctionExecutionInfo object.\n ", "language": "en", "n_whitespaces": 84, "n_words": 30, "vocab_size": 23 }
def get_execution_info(self, job_id, function_descriptor): function_id = function_descriptor.function_id # If the function has already been loaded, # There's no need to load again if function_id in self._function_execution_info: return self._function_execution_info[function_id] if self._worker.load_code_from_local: # Load function from local code. if not function_descriptor.is_actor_method(): # If the function is not able to be loaded, # try to load it from GCS, # even if load_code_from_local is set True if self._load_function_from_local(function_descriptor) is True: return self._function_execution_info[function_id] # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push warnings to the user if # we spend too long in this loop. # The driver function may not be found in sys.path. Try to load # the function from GCS. with profiling.profile("wait_for_function"): self._wait_for_function(function_descriptor, job_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[function_id] except KeyError as e: message = ( "Error occurs in get_execution_info: " "job_id: %s, function_descriptor: %s. Message: %s" % (job_id, function_descriptor, e) ) raise KeyError(message) return info
19,997
100,533
26
lib/gpu_stats/_base.py
12
8
def exclude_all_devices(self) -> bool: return all(idx in _EXCLUDE_DEVICES for idx in range(self._device_count))
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
exclude_all_devices
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
faceswap
_base.py
11
3
https://github.com/deepfakes/faceswap.git
2
24
0
11
40
Python
{ "docstring": " bool: ``True`` if all GPU devices have been explicitly disabled otherwise ``False`` ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 12 }
def exclude_all_devices(self) -> bool: return all(idx in _EXCLUDE_DEVICES for idx in range(self._device_count))
93,467
294,430
126
homeassistant/components/alexa/resources.py
35
9
def serialize_labels(self, resources): labels = [] for label in resources: if label in AlexaGlobalCatalog.__dict__.values(): label = {"@type": "asset", "value": {"assetId": label}} else: label = {"@type": "text", "va
Update pylint to 2.13.0 (#68656)
serialize_labels
53245c65238e3009dd1f3412f7f9bef10385f64e
core
resources.py
16
9
https://github.com/home-assistant/core.git
3
76
0
27
141
Python
{ "docstring": "Return resource label objects for friendlyNames serialized for an API response.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def serialize_labels(self, resources): labels = [] for label in resources: if label in AlexaGlobalCatalog.__dict__.values(): label = {"@type": "asset", "value": {"assetId": label}} else: label = {"@type": "text", "value": {"text": label, "locale": "en-US"}} labels.append(label) return {"friendlyNames": labels}
69,953
243,007
400
src/PIL/PpmImagePlugin.py
104
24
def _decode_bitonal(self): data = bytearray() total_bytes = self.state.xsize * self.state.ysize comment_spans = False while len(data) != total_bytes: block = self._read_block() # read next block if not block: # eof break while block and comment_spans: comment_end = self._find_comment_end(block) if comment_end != -1: # comment ends in this block block = block[comment_end + 1 :] # delete tail of previous comment break
Added support for PPM arbitrary maxval in plain formats
_decode_bitonal
c4d51fb2681c2434fd324098d116a66013549de7
Pillow
PpmImagePlugin.py
18
23
https://github.com/python-pillow/Pillow.git
8
159
0
65
279
Python
{ "docstring": "\n This is a separate method because in the plain PBM format, all data tokens are\n exactly one byte, so the inter-token whitespace is optional.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 22 }
def _decode_bitonal(self): data = bytearray() total_bytes = self.state.xsize * self.state.ysize comment_spans = False while len(data) != total_bytes: block = self._read_block() # read next block if not block: # eof break while block and comment_spans: comment_end = self._find_comment_end(block) if comment_end != -1: # comment ends in this block block = block[comment_end + 1 :] # delete tail of previous comment break else: # comment spans whole block block = self._read_block() block, comment_spans = self._ignore_comments(block) tokens = b"".join(block.split()) for token in tokens: if token not in (48, 49): raise ValueError(f"Invalid token for this mode: {bytes([token])}") data = (data + tokens)[:total_bytes] invert = bytes.maketrans(b"01", b"\xFF\x00") return data.translate(invert)
48,995
198,543
200
sympy/solvers/solvers.py
88
25
def recast_to_symbols(eqs, symbols): if not iterable(eqs) and iterable(symbols): raise ValueError('Both eqs and symbols must be iterable') orig = list(symbols) symbols = list(ordered(symbols)) swap_sym = {} i = 0 for j, s in e
ordered swaps and dict
recast_to_symbols
883f3c95de8eaa79e04a6b78199e07f0d9fbba6c
sympy
solvers.py
14
20
https://github.com/sympy/sympy.git
10
163
0
59
262
Python
{ "docstring": "\n Return (e, s, d) where e and s are versions of *eqs* and\n *symbols* in which any non-Symbol objects in *symbols* have\n been replaced with generic Dummy symbols and d is a dictionary\n that can be used to restore the original expressions.\n\n Examples\n ========\n\n >>> from sympy.solvers.solvers import recast_to_symbols\n >>> from sympy import symbols, Function\n >>> x, y = symbols('x y')\n >>> fx = Function('f')(x)\n >>> eqs, syms = [fx + 1, x, y], [fx, y]\n >>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d)\n ([_X0 + 1, x, y], [_X0, y], {_X0: f(x)})\n\n The original equations and symbols can be restored using d:\n\n >>> assert [i.xreplace(d) for i in eqs] == eqs\n >>> assert [d.get(i, i) for i in s] == syms\n\n ", "language": "en", "n_whitespaces": 176, "n_words": 124, "vocab_size": 85 }
def recast_to_symbols(eqs, symbols): if not iterable(eqs) and iterable(symbols): raise ValueError('Both eqs and symbols must be iterable') orig = list(symbols) symbols = list(ordered(symbols)) swap_sym = {} i = 0 for j, s in enumerate(symbols): if not isinstance(s, Symbol) and s not in swap_sym: swap_sym[s] = Dummy('X%d' % i) i += 1 new_f = [] for i in eqs: isubs = getattr(i, 'subs', None) if isubs is not None: new_f.append(isubs(swap_sym)) else: new_f.append(i) restore = {v: k for k, v in swap_sym.items()} return new_f, [swap_sym.get(i, i) for i in orig], restore
16,955
79,676
466
wagtail/models/reference_index.py
91
20
def model_is_indexable(cls, model, allow_child_models=False): if getattr(model, "wagtail_reference_index_ignore", False): return False # Don't check any models that have a parental key, references from these will be collected from the parent if not allow_child_models and any( [isinstance(field, ParentalKey) for field in model._meta.get_fields()] ): return False for field in model._meta.get_fields(): if field.is_relation and field.many_to_one: if getattr(field, "wagtail_reference_index_ignore", False): continue if getattr( field.related_model, "wagtail_reference_index_ignore", False ): continue if isinstance(field, (ParentalKey, GenericRel)): continue return True if hasattr(field, "extract_references"): return True if issubclass(model, ClusterableModel): for child_relation in get_all_child_relations(model): if cls.model_is_indexable( child_relation.related_model, allow_child_models=True, ): return True return False
Check field for .extract_references method instead of field type Co-authored-by: Matt Westcott <[email protected]>
model_is_indexable
c8689acb3724dc12fb09a0bfc14d7e4755a1ea0f
wagtail
reference_index.py
13
28
https://github.com/wagtail/wagtail.git
15
156
0
59
244
Python
{ "docstring": "\n Returns True if the given model may have outbound references that we would be interested in recording in the index.\n\n\n Args:\n model (type): a Django model class\n allow_child_models (boolean): Child models are not indexable on their own. If you are looking at\n a child model from the perspective of indexing it through its parent,\n set this to True to disable checking for this. Default False.\n ", "language": "en", "n_whitespaces": 191, "n_words": 65, "vocab_size": 55 }
def model_is_indexable(cls, model, allow_child_models=False): if getattr(model, "wagtail_reference_index_ignore", False): return False # Don't check any models that have a parental key, references from these will be collected from the parent if not allow_child_models and any( [isinstance(field, ParentalKey) for field in model._meta.get_fields()] ): return False for field in model._meta.get_fields(): if field.is_relation and field.many_to_one: if getattr(field, "wagtail_reference_index_ignore", False): continue if getattr( field.related_model, "wagtail_reference_index_ignore", False ): continue if isinstance(field, (ParentalKey, GenericRel)): continue return True if hasattr(field, "extract_references"): return True if issubclass(model, ClusterableModel): for child_relation in get_all_child_relations(model): if cls.model_is_indexable( child_relation.related_model, allow_child_models=True, ): return True return False
4,204
22,132
150
pipenv/patched/pip/_vendor/requests/utils.py
51
13
def get_encodings_from_content(content): warnings.warn( ( "In requests 3.0, get_encodings_from_content will be removed. For " "more information, please see the discussion on issue #2266. (This" " warning should only appear once.)" ), DeprecationWarning, ) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
get_encodings_from_content
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
utils.py
10
17
https://github.com/pypa/pipenv.git
1
81
0
44
135
Python
{ "docstring": "Returns encodings from given content string.\n\n :param content: bytestring to extract encodings from.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
def get_encodings_from_content(content): warnings.warn( ( "In requests 3.0, get_encodings_from_content will be removed. For " "more information, please see the discussion on issue #2266. (This" " warning should only appear once.)" ), DeprecationWarning, ) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') return ( charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content) )
23,734
109,752
177
lib/mpl_toolkits/mplot3d/axes3d.py
79
21
def _scale_axis_limits(self, scale_x, scale_y, scale_z): # Get the axis limits and centers minx, maxx, miny, maxy, minz, maxz = self.get_w_lims() cx = (maxx + minx)/2 cy = (maxy + miny)/2 cz = (maxz + minz)/2 # Scale the data range dx = (maxx - minx)*scale_x dy = (maxy - miny)*scale_y dz = (maxz - minz)*scale_z # Set the scaled axis limits self.set_xlim3d(cx - dx/2, cx + dx/2) self.set_ylim3d(cy - dy/2, cy + dy/2)
Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449) * ENH: Add pan and zoom toolbar handling to 3D Axes 1) This moves the pan logic that was already in the mouse move handler into the "drag_pan" method to make it available from the toolbar. 2) This expands upon the panning logic to enable a zoom-to-box feature. The zoom-to-box is done relative to the Axes, so it shrinks/expands the box as a fraction of each delta, from lower-left Axes to lower-left zoom-box. Thus, it tries to handle non-centered zooms, which adds more cases to handle versus the current right-click zoom only scaling from the center of the projection. * Rewrite zooming with bounding box * Rewrite 3d panning to work with a roll angle * Whats new for zoom and pan buttons * Make pan button configurable * Do not jump when zooming and mouse goes over other subplot * Rework zooming for 3d plots * Handle x/y lock when zooming and panning * Update tests * Docstrings * Dont assume a scale_z * Limit zoom box * Test zoom pan key modifiers * Save some calculation by saving view axes * Deprecation warnings for Axes3D.eye, .vvec * Remove Axes3D._prepare_view_from_bbox for now * Comments and docstrings * Switch from uvn to uvw * Save aspect to axes * Constrain zooming with mouse when one of the equal aspect ratios is set * Cleanup * Cleanup * Consolidate finding equal aspect axis indices * linting * More intuitive scaling * Box zoom keeps existing aspect ratios * Linting * Code review comments * Revert parameters for view_transformation * Fix new 3d pan/zoom view going on view stack twice * Better clipping * Test 3d toolbar navigation * Privatize helper functions * Deprecations * Code review changes * Deprecation note * Undeprecate proj3d.view_transformation * Undeprecate proj3d.view_transformation * Update doc/api/next_api_changes/deprecations/23449-SS.rst Co-authored-by: Greg Lucas <[email protected]> Co-authored-by: Scott Shambaugh <[email protected]> Co-authored-by: Oscar Gustafsson <[email protected]>
_scale_axis_limits
4896ec1a2cfb8c454e385632d8df213c915ced52
matplotlib
axes3d.py
9
11
https://github.com/matplotlib/matplotlib.git
1
131
0
51
201
Python
{ "docstring": "\n Keeping the center of the x, y, and z data axes fixed, scale their\n limits by scale factors. A scale factor > 1 zooms out and a scale\n factor < 1 zooms in.\n\n Parameters\n ----------\n scale_x : float\n Scale factor for the x data axis.\n scale_y : float\n Scale factor for the y data axis.\n scale_z : float\n Scale factor for the z data axis.\n ", "language": "en", "n_whitespaces": 162, "n_words": 65, "vocab_size": 37 }
def _scale_axis_limits(self, scale_x, scale_y, scale_z): # Get the axis limits and centers minx, maxx, miny, maxy, minz, maxz = self.get_w_lims() cx = (maxx + minx)/2 cy = (maxy + miny)/2 cz = (maxz + minz)/2 # Scale the data range dx = (maxx - minx)*scale_x dy = (maxy - miny)*scale_y dz = (maxz - minz)*scale_z # Set the scaled axis limits self.set_xlim3d(cx - dx/2, cx + dx/2) self.set_ylim3d(cy - dy/2, cy + dy/2) self.set_zlim3d(cz - dz/2, cz + dz/2)
44,489
184,113
93
src/textual/widget.py
20
10
def layers(self) -> tuple[str, ...]: for node in se
layers and docks
layers
c98e1b96049369f6af013a133f204ae0a286f2c7
textual
widget.py
11
12
https://github.com/Textualize/textual.git
4
51
0
18
84
Python
{ "docstring": "Layers of from parent.\n\n Returns:\n tuple[str, ...]: Tuple of layer names.\n ", "language": "en", "n_whitespaces": 36, "n_words": 11, "vocab_size": 10 }
def layers(self) -> tuple[str, ...]: for node in self.ancestors: if not isinstance(node, Widget): break if node.styles.has_rule("layers"): return node.styles.layers return ("default",)
70,663
245,114
57
mmdet/testing/_utils.py
33
13
def get_roi_head_cfg(fname): config = _get_config_module(fname) model = copy.deepcopy(config.model) roi_head = model.roi_head train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg)) return roi_head
Refactor Double Head, MS, Dynamic, Trident.
get_roi_head_cfg
cd4e9ed8269b0c767e129169b7268b0ced7e60c9
mmdetection
_utils.py
10
8
https://github.com/open-mmlab/mmdetection.git
3
74
0
22
117
Python
{ "docstring": "Grab configs necessary to create a roi_head.\n\n These are deep copied to allow for safe modification of parameters without\n influencing other tests.\n ", "language": "en", "n_whitespaces": 31, "n_words": 22, "vocab_size": 21 }
def get_roi_head_cfg(fname): config = _get_config_module(fname) model = copy.deepcopy(config.model) roi_head = model.roi_head train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg)) return roi_head
@contextmanager
11,963
59,935
95
src/prefect/logging/loggers.py
56
19
def print_as_log(*args, **kwargs): from prefect.context import FlowRunContext, T
Add `log_prints` option to redirect print to logs (#7580) Co-authored-by: Will Raphaelson <[email protected]> Co-authored-by: Will Raphaelson <[email protected]> Co-authored-by: Nathan Nowack <[email protected]> Co-authored-by: Terrence Dorsey <[email protected]>
print_as_log
298554b26fa5d866d34fed5f6e8646edb56984a4
prefect
loggers.py
11
10
https://github.com/PrefectHQ/prefect.git
4
89
1
43
157
Python
{ "docstring": "\n A patch for `print` to send printed messages to the Prefect run logger.\n\n If no run is active, `print` will behave as if it were not patched.\n ", "language": "en", "n_whitespaces": 37, "n_words": 27, "vocab_size": 24 }
def print_as_log(*args, **kwargs): from prefect.context import FlowRunContext, TaskRunContext context = TaskRunContext.get() or FlowRunContext.get() if not context or not context.log_prints: return print(*args, **kwargs) logger = get_run_logger() # Print to an in-memory buffer; so we do not need to implement `print` buffer = io.StringIO() kwargs["file"] = buffer print(*args, **kwargs) # Remove trailing whitespace to prevent duplicates logger.info(buffer.getvalue().rstrip()) @contextmanager
62,617
230,975
110
packages/python/plotly/plotly/matplotlylib/mplexporter/tests/test_basic.py
55
23
def test_image(): # Test fails for mat
Updated distutils.Version to packaging.Version
test_image
1d82b8822120db088bfeb6c8eae7ec8df9703783
plotly.py
test_basic.py
11
18
https://github.com/plotly/plotly.py.git
2
94
0
45
159
Python
{ "docstring": "\n opening figure\n opening axes\n draw image of size {image_size} \n closing axes\n closing figure\n ", "language": "en", "n_whitespaces": 159, "n_words": 13, "vocab_size": 9 }
def test_image(): # Test fails for matplotlib 1.5+ because the size of the image # generated by matplotlib has changed. if Version(matplotlib.__version__) == Version("3.4.1"): image_size = 432 else: pytest.skip("Test fails for older matplotlib") np.random.seed(0) # image size depends on the seed fig, ax = plt.subplots(figsize=(2, 2)) ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation="nearest") _assert_output_equal( fake_renderer_output(fig, FakeRenderer), f, )
54,361
216,055
112
salt/cloud/clouds/proxmox.py
48
17
def ignore_cidr(vm_, ip): from ipaddress import ip_address, ip_network cidrs = config.get_cloud_config_value( "ignore_cidr", vm_, __opts__, default=[], search_global=False ) if cidrs and isinstance(cidrs, str): cidrs = [cidrs] for cidr in cidrs or []: if ip_address(ip) in ip_network(cidr): log.warning("IP %r found within %r; ignoring it.", ip, cidr) return True retur
Add support for get IP-address from agent
ignore_cidr
a5679caf65c7c79cd72841b6e5793b9b693744c9
salt
proxmox.py
12
12
https://github.com/saltstack/salt.git
6
83
0
41
131
Python
{ "docstring": "\n Return True if we are to ignore the specified IP.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
def ignore_cidr(vm_, ip): from ipaddress import ip_address, ip_network cidrs = config.get_cloud_config_value( "ignore_cidr", vm_, __opts__, default=[], search_global=False ) if cidrs and isinstance(cidrs, str): cidrs = [cidrs] for cidr in cidrs or []: if ip_address(ip) in ip_network(cidr): log.warning("IP %r found within %r; ignoring it.", ip, cidr) return True return False
21,883
104,508
824
src/datasets/features/features.py
270
29
def encode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt) != first_elmt: return [encode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]] return list_dict else: # obj is
Module namespace cleanup for v2.0 (#3875) * Imports cleaning * Small change * Remove unused methods * Small fix * Additional fix * Final fix * Fix benchmark test * Fix benchmark test #2
encode_nested_example
ba4d30c42e0702bd894c36777d7d2c0adf74516c
datasets
features.py
19
41
https://github.com/huggingface/datasets.git
27
356
0
134
541
Python
{ "docstring": "Encode a nested example.\n This is used since some features (in particular ClassLabel) have some logic during encoding.\n\n To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.\n If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.\n ", "language": "en", "n_whitespaces": 83, "n_words": 71, "vocab_size": 55 }
def encode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt) != first_elmt: return [encode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]] return list_dict else: # obj is a single dict for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): list_dict[k] = [encode_nested_example(sub_schema, o) for o in sub_objs] return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt) != first_elmt: return [encode_nested_example(schema.feature, o) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj
20,490
101,053
73
scripts/train.py
38
4
def _configure_matplotlib(cls): rcParams["keymap.fullscreen"] = [k for k in rcParams["keymap.fullscreen"] if k != "f"
bugfix: Stop preview window from stealing focus
_configure_matplotlib
c8122bc499afba4fcb99030e42e08bfb8d3a75e1
faceswap
train.py
10
5
https://github.com/deepfakes/faceswap.git
7
69
0
17
123
Python
{ "docstring": " Remove `F`, 'S' and 'R' from their default bindings and stop Matplotlib from stealing\n focus ", "language": "en", "n_whitespaces": 23, "n_words": 15, "vocab_size": 13 }
def _configure_matplotlib(cls): rcParams["keymap.fullscreen"] = [k for k in rcParams["keymap.fullscreen"] if k != "f"] rcParams["keymap.save"] = [k for k in rcParams["keymap.save"] if k != "s"] rcParams["keymap.home"] = [k for k in rcParams["keymap.home"] if k != "r"] rcParams["figure.raise_window"] = False
17,337
82,284
68
cms/cache/permissions.py
27
13
def set_permission_cache(user, key, value): from django.core.cache import cache # store this key, so we can clean it when required cache_ke
Enabled isort workflow (#7200) * Ran isort * Enabled isort workflow Co-authored-by: Vinit Kumar <[email protected]>
set_permission_cache
a3110e1ff24085373898c7d2a85f628abeb8518d
django-cms
permissions.py
11
6
https://github.com/django-cms/django-cms.git
1
48
0
26
77
Python
{ "docstring": "\n Helper method for storing values in cache. Stores used keys so\n all of them can be cleaned when clean_permission_cache gets called.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 21 }
def set_permission_cache(user, key, value): from django.core.cache import cache # store this key, so we can clean it when required cache_key = get_cache_key(user, key) cache.set(cache_key, value, get_cms_setting('CACHE_DURATIONS')['permissions'], version=get_cache_permission_version())
73,611
251,155
55
mitmproxy/http.py
12
10
def cookies(self) -> multidict.MultiDictView[str, tuple[str, multidict.MultiDict[str, Optional[str]]]]: return multidict.MultiDict
`pyupgrade --py39-plus **/*.py`
cookies
e83ec8390ad6be6a86cfcfc57bce14cb8861bf32
mitmproxy
http.py
8
14
https://github.com/mitmproxy/mitmproxy.git
1
43
0
12
63
Python
{ "docstring": "\n The response cookies. A possibly empty `MultiDictView`, where the keys are cookie\n name strings, and values are `(cookie value, attributes)` tuples. Within\n attributes, unary attributes (e.g. `HTTPOnly`) are indicated by a `None` value.\n Modifications to the MultiDictView update `Response.headers`, and vice versa.\n\n *Warning:* Changes to `attributes` will not be picked up unless you also reassign\n the `(cookie value, attributes)` tuple directly in the `MultiDictView`.\n ", "language": "en", "n_whitespaces": 114, "n_words": 64, "vocab_size": 54 }
def cookies(self) -> multidict.MultiDictView[str, tuple[str, multidict.MultiDict[str, Optional[str]]]]: return multidict.MultiDictView( self._get_cookies, self._set_cookies )
34,221
148,285
11
python/ray/_private/thirdparty/pathspec/util.py
16
7
def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files
[Bugfix] fix invalid excluding of Black (#24042) - We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options - Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.
_normalize_entries
0e6c042e29cbbe429d81c9c1af3c75c261f00980
ray
util.py
12
5
https://github.com/ray-project/ray.git
2
36
0
13
57
Python
{ "docstring": "\n\tNormalizes the entry paths to use the POSIX path separator.\n\n\t*entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`)\n\tcontains the entries to be normalized.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tSee :func:`normalize_file` for more information.\n\n\tReturns a :class:`dict` mapping the each normalized file path (:class:`str`)\n\tto the entry (:class:`.TreeEntry`)\n\t", "language": "en", "n_whitespaces": 44, "n_words": 52, "vocab_size": 39 }
def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files
51,528
206,460
200
django/test/testcases.py
45
13
def assertXMLNotEqual(self, xml1, xml2, msg=None): try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = "First or second argument is not valid XML\n%s" %
Refs #33476 -- Reformatted code with Black.
assertXMLNotEqual
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
testcases.py
15
13
https://github.com/django/django.git
3
85
0
38
137
Python
{ "docstring": "\n Assert that two XML snippets are not semantically equivalent.\n Whitespace in most cases is ignored and attribute ordering is not\n significant. The arguments must be valid XML.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 25 }
def assertXMLNotEqual(self, xml1, xml2, msg=None): try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = "First or second argument is not valid XML\n%s" % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = "%s == %s" % ( safe_repr(xml1, True), safe_repr(xml2, True), ) self.fail(self._formatMessage(msg, standardMsg))
51,399
206,187
334
django/template/base.py
95
13
def token_kwargs(bits, parser, support_legacy=False): if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match[1] if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != "as": return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match[1]: return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != "as": return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != "and": return kwargs del bits[:1] return kwargs
Refs #33476 -- Reformatted code with Black.
token_kwargs
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
14
29
https://github.com/django/django.git
16
188
0
40
303
Python
{ "docstring": "\n Parse token keyword arguments and return a dictionary of the arguments\n retrieved from the ``bits`` token list.\n\n `bits` is a list containing the remainder of the token (split by spaces)\n that is to be checked for arguments. Valid arguments are removed from this\n list.\n\n `support_legacy` - if True, the legacy format ``1 as foo`` is accepted.\n Otherwise, only the standard ``foo=1`` format is allowed.\n\n There is no requirement for all remaining token ``bits`` to be keyword\n arguments, so return the dictionary as soon as an invalid argument format\n is reached.\n ", "language": "en", "n_whitespaces": 124, "n_words": 90, "vocab_size": 59 }
def token_kwargs(bits, parser, support_legacy=False): if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match[1] if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != "as": return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match[1]: return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != "as": return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != "and": return kwargs del bits[:1] return kwargs
13,924
65,547
4
erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py
6
4
def get_cost_of_delayed_shipments(scorecard): return get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorec
style: format code with black
get_cost_of_delayed_shipments
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
supplier_scorecard_variable.py
8
2
https://github.com/frappe/erpnext.git
1
16
0
6
29
Python
{ "docstring": "Gets the total cost of all delayed shipments in the period (based on Purchase Receipts - POs)", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 16 }
def get_cost_of_delayed_shipments(scorecard): return get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorecard)
70,253
244,126
230
mmdet/models/losses/utils.py
112
12
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) els
[Fix] Fix reduction=mean in CELoss. (#7449) * [Fix] Fix ignore in CELoss. * add ut * fix and add comments * add avg_non_ignore option * bce avg * fix lint
weight_reduce_loss
3b2e9655631a2edd28bb94c640bd6a74c0bfad55
mmdetection
utils.py
15
12
https://github.com/open-mmlab/mmdetection.git
5
86
0
69
150
Python
{ "docstring": "Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Average factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n ", "language": "en", "n_whitespaces": 82, "n_words": 38, "vocab_size": 32 }
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': # Avoid causing ZeroDivisionError when avg_factor is 0.0, # i.e., all labels of an image belong to ignore index. eps = torch.finfo(torch.float32).eps loss = loss.sum() / (avg_factor + eps) # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss
@pytest.mark.filterwarnings("ignore:The problem size")
69,727
241,889
62
scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py
35
22
def test_failure_to_run_iterations(): rnd = np.random.RandomState(0) X = rnd.standard_normal((100, 10)) A = X @ X.T Q = rnd.standard_normal((X.shape[0], 4)) with pytest.warns(UserWarning,
Update test_lobpcg.py copy/paste from #15280
test_failure_to_run_iterations
e477bab940324648c6f6e2fb53f0932dff19b11b
scipy
test_lobpcg.py
11
8
https://github.com/scipy/scipy.git
1
88
1
30
158
Python
{ "docstring": "Check that the code exists gracefully without breaking. Issue #10974.\n ", "language": "en", "n_whitespaces": 13, "n_words": 10, "vocab_size": 10 }
def test_failure_to_run_iterations(): rnd = np.random.RandomState(0) X = rnd.standard_normal((100, 10)) A = X @ X.T Q = rnd.standard_normal((X.shape[0], 4)) with pytest.warns(UserWarning, match="Exited at iteration"): eigenvalues, _ = lobpcg(A, Q, maxiter=20) assert(np.max(eigenvalues) > 0) @pytest.mark.filterwarnings("ignore:The problem size")
25,675
116,154
298
tests/unit/test_executor.py
82
22
def test_predictor_tableau_header(self, mock_handler): df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) # --- use predictor --- predicted_value = 5 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) # second column is having last value of 'b' # 3: count rows, 4: sum of 'a', 5 max of pre
executor tests
test_predictor_tableau_header
02a831997cdffafca7cb160eb1938e72020ee049
mindsdb
test_executor.py
12
32
https://github.com/mindsdb/mindsdb.git
1
143
0
64
250
Python
{ "docstring": "\n SELECT \n SUM(1) AS `cnt__0B4A4E8BD11C48FFB4730D4D2C32191A_ok`,\n sum(`Custom SQL Query`.`a`) AS `sum_height_ok`,\n max(`Custom SQL Query`.`p`) AS `sum_length1_ok`\n FROM (\n SELECT res.a, res.p \n FROM pg.tasks as source\n JOIN mindsdb.task_model as res\n ) `Custom SQL Query`\n HAVING (COUNT(1) > 0)\n ", "language": "en", "n_whitespaces": 176, "n_words": 35, "vocab_size": 28 }
def test_predictor_tableau_header(self, mock_handler): df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) # --- use predictor --- predicted_value = 5 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) # second column is having last value of 'b' # 3: count rows, 4: sum of 'a', 5 max of prediction assert ret.data[0] == [3, 4, 5]
9,584
48,733
41
tests/test_routers.py
9
10
def test_conflicting_specified_basename_different_models(self): self.router.register(r'notes', NoteViewSet) with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_basename', BasenameViewSet, basename='routertestmodel')
raise ImproperlyConfigured exception if `basename` is not unique (#8438) * raise ImproperlyConfigured if basename already exists * rename already_registered function; return True/False * additional basename tests * additional basename tests * Update rest_framework/routers.py Co-authored-by: David Graves <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
test_conflicting_specified_basename_different_models
48a21aa0eb3a95d32456c2a927eff9552a04231e
django-rest-framework
test_routers.py
11
4
https://github.com/encode/django-rest-framework.git
1
40
0
9
69
Python
{ "docstring": "\n Ensure 2 routers with different models, and a conflicting basename specified\n throws an exception\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
def test_conflicting_specified_basename_different_models(self): self.router.register(r'notes', NoteViewSet) with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_basename', BasenameViewSet, basename='routertestmodel')
18,525
89,279
220
src/sentry/dynamic_sampling/latest_release_booster.py
31
18
def _get_boosted_releases(self) -> BoostedReleases: boosted_releases = BoostedReleases() for boosted_release_cache_key, timestamp in self.redis_client.hgetall( self._generate_cache_key_for_boosted_releases_hash() ).items(): extracted_data = self._extr
ref(metrics): Change implementation of latest release [TET-555] (#41757)
_get_boosted_releases
16b946cef6e851e40d552e1f9a9d44d0f7d31509
sentry
latest_release_booster.py
14
21
https://github.com/getsentry/sentry.git
3
77
0
27
123
Python
{ "docstring": "\n Returns all the boosted releases and parses them based on key and value data.\n\n This method should not be called directly as the boosted releases are not extended, thus they contain only a\n subset of information.\n ", "language": "en", "n_whitespaces": 65, "n_words": 36, "vocab_size": 31 }
def _get_boosted_releases(self) -> BoostedReleases: boosted_releases = BoostedReleases() for boosted_release_cache_key, timestamp in self.redis_client.hgetall( self._generate_cache_key_for_boosted_releases_hash() ).items(): extracted_data = self._extract_data_from_cache_key(boosted_release_cache_key) if extracted_data: release_id, environment = extracted_data boosted_releases.add_release( cache_key=boosted_release_cache_key, id=release_id, timestamp=float(timestamp), environment=environment, ) return boosted_releases
29,350
130,772
800
python/ray/internal/internal_api.py
194
20
def store_stats_summary(reply): store_summary = "--- Aggregate object store stats across all nodes ---\n" # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primar
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
store_stats_summary
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
internal_api.py
17
56
https://github.com/ray-project/ray.git
6
272
0
101
438
Python
{ "docstring": "Returns formatted string describing object store stats in all nodes.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def store_stats_summary(reply): store_summary = "--- Aggregate object store stats across all nodes ---\n" # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primary, etc.) store_summary += ( "Plasma memory usage {} MiB, {} objects, {}% full, {}% " "needed\n".format( int(reply.store_stats.object_store_bytes_used / (1024 * 1024)), reply.store_stats.num_local_objects, round( 100 * reply.store_stats.object_store_bytes_used / reply.store_stats.object_store_bytes_avail, 2, ), round( 100 * reply.store_stats.object_store_bytes_primary_copy / reply.store_stats.object_store_bytes_avail, 2, ), ) ) if reply.store_stats.object_store_bytes_fallback > 0: store_summary += "Plasma filesystem mmap usage: {} MiB\n".format( int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024)) ) if reply.store_stats.spill_time_total_s > 0: store_summary += ( "Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".format( int(reply.store_stats.spilled_bytes_total / (1024 * 1024)), reply.store_stats.spilled_objects_total, int( reply.store_stats.spilled_bytes_total / (1024 * 1024) / reply.store_stats.spill_time_total_s ), ) ) if reply.store_stats.restore_time_total_s > 0: store_summary += ( "Restored {} MiB, {} objects, avg read throughput {} MiB/s\n".format( int(reply.store_stats.restored_bytes_total / (1024 * 1024)), reply.store_stats.restored_objects_total, int( reply.store_stats.restored_bytes_total / (1024 * 1024) / reply.store_stats.restore_time_total_s ), ) ) if reply.store_stats.consumed_bytes > 0: store_summary += "Objects consumed by Ray tasks: {} MiB.\n".format( int(reply.store_stats.consumed_bytes / (1024 * 1024)) ) if reply.store_stats.object_pulls_queued: store_summary += "Object fetches queued, waiting for available memory." return store_summary
73,416
250,395
20
tests/handlers/test_register.py
6
7
def test_spam_checker_deny(self) -> None: self.get_failure(self.handler.register_user(localpart="user"), SynapseError)
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
test_spam_checker_deny
652d1669c5a103b1c20478770c4aaf18849c09a3
synapse
test_register.py
11
3
https://github.com/matrix-org/synapse.git
1
25
0
6
44
Python
{ "docstring": "A spam checker can deny registration, which results in an error.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_spam_checker_deny(self) -> None: self.get_failure(self.handler.register_user(localpart="user"), SynapseError)
21,073
101,668
71
lib/align/aligned_face.py
28
19
def _get_pitch_yaw_roll(self) -> None: proj_matrix = np.zeros((3, 4), dtype="float32") proj_matrix[:3,
sort tool: Add sort by roll
_get_pitch_yaw_roll
a7d0898f64adc9816427c4923074c7955ce95ac8
faceswap
aligned_face.py
12
7
https://github.com/deepfakes/faceswap.git
1
90
0
25
143
Python
{ "docstring": " Obtain the yaw, roll and pitch from the :attr:`_rotation` in eular angles. ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 11 }
def _get_pitch_yaw_roll(self) -> None: proj_matrix = np.zeros((3, 4), dtype="float32") proj_matrix[:3, :3] = cv2.Rodrigues(self._rotation)[0] euler = cv2.decomposeProjectionMatrix(proj_matrix)[-1] self._pitch_yaw_roll = cast(Tuple[float, float, float], tuple(euler.squeeze())) logger.trace("yaw_pitch: %s", self._pitch_yaw_roll) # type: ignore
91,989
292,922
36
tests/components/dlna_dmr/test_data.py
13
6
def aiohttp_notify_servers_mock() -> Iterable[Mock]: with patch( "homeassistant.
Bump async-upnp-client to 0.25.0 (#66414) Co-authored-by: J. Nick Koston <[email protected]>
aiohttp_notify_servers_mock
dbbb5655e5df0d72ca6b5534af624b54027cbb6d
core
test_data.py
11
17
https://github.com/home-assistant/core.git
2
50
0
13
44
Python
{ "docstring": "Construct mock AiohttpNotifyServer on demand, eliminating network use.\n\n This fixture provides a list of the constructed servers.\n ", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 17 }
def aiohttp_notify_servers_mock() -> Iterable[Mock]: with patch( "homeassistant.components.dlna_dmr.data.AiohttpNotifyServer" ) as mock_constructor: servers = []
23,823
109,916
25
lib/mpl_toolkits/mplot3d/art3d.py
13
10
def line_collection_2d_to_3d(col, zs=0, zdir='z'): segments3d = _paths_to_3d_segments(col.get_p
Improve mpl_toolkit documentation
line_collection_2d_to_3d
df6f95703b60348e01603f98a439b133da2938a0
matplotlib
art3d.py
10
4
https://github.com/matplotlib/matplotlib.git
1
39
0
12
64
Python
{ "docstring": "Convert a `.LineCollection` to a `.Line3DCollection` object.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 6 }
def line_collection_2d_to_3d(col, zs=0, zdir='z'): segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir) col.__class__ = Line3DCollection col.set_segments(segments3d)
13,581
64,235
259
erpnext/patches/v13_0/convert_to_website_item_in_item_card_group_template.py
69
23
def execute(): frappe.reload_doc("e_commerce", "web_template", "item_card_group") blocks = frappe.db.get_all( "Web Page Block", filters={"web_template": "Item Card Group"}, fields=["parent", "web_template_values", "name"] ) fields = generate_fields_to_edit() for block in blocks: web_template_value = json.loads(block.get('web_template_values')) for field in fields: item = web_template_value.get(field) if not item: continue if frappe.db.exists("Website Item", {"item_code": item}): website_item = frappe.db.get_value("Website Item", {"item_c
fix: Convert Item links to Website Item links in Item Card Group template data - Changed link option to Website Item in Item card group template - patch to convert pre-existing data
execute
456f27724c975685c2d5a92c20296737b11c084d
erpnext
convert_to_website_item_in_item_card_group_template.py
17
22
https://github.com/frappe/erpnext.git
6
159
0
51
275
Python
{ "docstring": "\n Convert all Item links to Website Item link values in\n exisitng 'Item Card Group' Web Page Block data.\n ", "language": "en", "n_whitespaces": 36, "n_words": 18, "vocab_size": 17 }
def execute(): frappe.reload_doc("e_commerce", "web_template", "item_card_group") blocks = frappe.db.get_all( "Web Page Block", filters={"web_template": "Item Card Group"}, fields=["parent", "web_template_values", "name"] ) fields = generate_fields_to_edit() for block in blocks: web_template_value = json.loads(block.get('web_template_values')) for field in fields: item = web_template_value.get(field) if not item: continue if frappe.db.exists("Website Item", {"item_code": item}): website_item = frappe.db.get_value("Website Item", {"item_code": item}) else: website_item = make_new_website_item(item, web_template_value, field) continue if website_item: web_template_value[field] = website_item frappe.db.set_value("Web Page Block", block.name, "web_template_values", json.dumps(web_template_value))
23,452
109,136
60
lib/matplotlib/lines.py
25
9
def set_pickradius(self, pickradius): if not isinstance(pickradius, Number) or pickradius < 0: raise ValueError("pick radius should be a distance") self._pickradius = pickradius pickradius = property(ge
Unify set_pickradius argument
set_pickradius
91f47d6eff63187f582c395c007d0152980be6b3
matplotlib
lines.py
10
4
https://github.com/matplotlib/matplotlib.git
3
31
0
22
65
Python
{ "docstring": "\n Set the pick radius used for containment tests.\n\n See `.contains` for more details.\n\n Parameters\n ----------\n pickradius : float\n Pick radius, in points.\n ", "language": "en", "n_whitespaces": 76, "n_words": 22, "vocab_size": 21 }
def set_pickradius(self, pickradius): if not isinstance(pickradius, Number) or pickradius < 0: raise ValueError("pick radius should be a distance") self._pickradius = pickradius pickradius = property(get_pickradius, set_pickradius)
19,977
100,509
292
tools/preview/preview.py
57
18
def _predict(self): with self._lock: self._predicted_images = [] for frame in self._input_images: sel
bugfix: Preview Tool, ensure all config items are written
_predict
71c20252c2e747f692289cdefe80ad0d5a456ea6
faceswap
preview.py
14
17
https://github.com/deepfakes/faceswap.git
5
117
0
40
198
Python
{ "docstring": " Predict from the loaded frames.\n\n With a threading lock (to prevent stacking), run the selected faces through the Faceswap\n model predict function and add the output to :attr:`predicted`\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 25 }
def _predict(self): with self._lock: self._predicted_images = [] for frame in self._input_images: self._predictor.in_queue.put(frame) idx = 0 while idx < self._sample_size: logger.debug("Predicting face %s of %s", idx + 1, self._sample_size) items = self._predictor.out_queue.get() if items == "EOF": logger.debug("Received EOF") break for item in items: self._predicted_images.append(item) logger.debug("Predicted face %s of %s", idx + 1, self._sample_size) idx += 1 logger.debug("Predicted faces")
48,907
198,396
50
sympy/polys/polytools.py
22
10
def exclude(f): J, new = f.rep.exclude() gens = [gen for j, gen in enumer
Cleanup loops and ranges
exclude
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
sympy
polytools.py
11
4
https://github.com/sympy/sympy.git
3
49
0
20
78
Python
{ "docstring": "\n Remove unnecessary generators from ``f``.\n\n Examples\n ========\n\n >>> from sympy import Poly\n >>> from sympy.abc import a, b, c, d, x\n\n >>> Poly(a + x, a, b, c, d, x).exclude()\n Poly(a + x, a, x, domain='ZZ')\n\n ", "language": "en", "n_whitespaces": 93, "n_words": 36, "vocab_size": 22 }
def exclude(f): J, new = f.rep.exclude() gens = [gen for j, gen in enumerate(f.gens) if j not in J] return f.per(new, gens=gens)
22,741
107,424
60
lib/matplotlib/axis.py
21
9
def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs): result = self._set_tick_locations(ticks, minor=minor) if labels is not None: self.set_ticklabels(label
Expanded documentation of Axis.set_ticks as per discussion in issue #22262 (#22270) * Expanded documentation of Axis.set_ticks() * Fix flake8 W293 (blank line contains whitespace) warnings * Expanded the documentation even more based on discussion in issue #22262 * Update lib/matplotlib/axis.py - @jklymak rewording Co-authored-by: Jody Klymak <[email protected]> * Reduced verbosity of doc by @jklymak 's suggestion. * On second thought, the previous wording could be seen as very ambiguous. * Update set_ticks docstring by @timhoffm compromise suggestion Co-authored-by: Tim Hoffmann <[email protected]> * Removed extra sentence as per @timhoffm review * Blank line whitespace issue crept up again * Update lib/matplotlib/axis.py as per correction by @timhoffm Co-authored-by: Tim Hoffmann <[email protected]> Co-authored-by: unknown <> Co-authored-by: Jody Klymak <[email protected]> Co-authored-by: Tim Hoffmann <[email protected]>
set_ticks
695bc25c7a9b198e00c54496a8eed56a8a908cbf
matplotlib
axis.py
10
5
https://github.com/matplotlib/matplotlib.git
2
54
0
20
81
Python
{ "docstring": "\n Set this Axis' tick locations and optionally labels.\n\n If necessary, the view limits of the Axis are expanded so that all\n given ticks are visible.\n\n Parameters\n ----------\n ticks : list of floats\n List of tick locations. The axis `.Locator` is replaced by a\n `~.ticker.FixedLocator`.\n\n Some tick formatters will not label arbitrary tick positions;\n e.g. log formatters only label decade ticks by default. In\n such a case you can set a formatter explicitly on the axis\n using `.Axis.set_major_formatter` or provide formatted\n *labels* yourself.\n labels : list of str, optional\n List of tick labels. If not set, the labels are generated with\n the axis tick `.Formatter`.\n minor : bool, default: False\n If ``False``, set the major ticks; if ``True``, the minor ticks.\n **kwargs\n `.Text` properties for the labels. These take effect only if you\n pass *labels*. In other cases, please use `~.Axes.tick_params`.\n\n Notes\n -----\n The mandatory expansion of the view limits is an intentional design\n choice to prevent the surprise of a non-visible tick. If you need\n other limits, you should set the limits explicitly after setting the\n ticks.\n ", "language": "en", "n_whitespaces": 423, "n_words": 177, "vocab_size": 115 }
def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs): result = self._set_tick_locations(ticks, minor=minor) if labels is not None: self.set_ticklabels(labels, minor=minor, **kwargs) return result
@frappe.whitelist()
14,558
67,567
45
erpnext/startup/leaderboard.py
73
22
def get_all_customers(date_range, company, field, limit=None): if field == "outstanding_amount": filters = [["docstatus", "=", "1"], ["company", "=", company]] if date_range: date_range = frappe.parse_json(date_range) filters.append(["posting_date", ">=", "between", [date_range[0], date_range[1]]]) return frappe.db.get_all( "Sales Invoice", fields=["customer as name", "sum(outstanding_amount) as value"], filters=filters, group_by="customer", order_by="value desc", limit=limit, ) else: if field == "total_sales_amount": select_field = "sum(so_item.base_net_amount)" elif field == "total_qty_sold": select_field = "sum(so_item.stock_qty)" date_condition = get_date_condition(date_range, "so.transaction_date") return frappe.db.sql( .format( select_field, dat
style: format code with black
get_all_customers
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
leaderboard.py
14
35
https://github.com/frappe/erpnext.git
5
162
1
57
280
Python
{ "docstring": "\n\t\t\tselect so.customer as name, {0} as value\n\t\t\tFROM `tabSales Order` as so JOIN `tabSales Order Item` as so_item\n\t\t\t\tON so.name = so_item.parent\n\t\t\twhere so.docstatus = 1 {1} and so.company = %s\n\t\t\tgroup by so.customer\n\t\t\torder by value DESC\n\t\t\tlimit %s\n\t\t", "language": "en", "n_whitespaces": 33, "n_words": 40, "vocab_size": 30 }
def get_all_customers(date_range, company, field, limit=None): if field == "outstanding_amount": filters = [["docstatus", "=", "1"], ["company", "=", company]] if date_range: date_range = frappe.parse_json(date_range) filters.append(["posting_date", ">=", "between", [date_range[0], date_range[1]]]) return frappe.db.get_all( "Sales Invoice", fields=["customer as name", "sum(outstanding_amount) as value"], filters=filters, group_by="customer", order_by="value desc", limit=limit, ) else: if field == "total_sales_amount": select_field = "sum(so_item.base_net_amount)" elif field == "total_qty_sold": select_field = "sum(so_item.stock_qty)" date_condition = get_date_condition(date_range, "so.transaction_date") return frappe.db.sql( .format( select_field, date_condition ), (company, cint(limit)), as_dict=1, ) @frappe.whitelist()
103,871
305,079
189
homeassistant/components/zha/config_flow.py
45
20
async def _async_create_radio_entity(self) -> FlowResult: assert self._title is not None assert self._radio_type is not None assert self._device_path is not None assert self._device_settings is not None device_settings = self._device_settings.copy() device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job( usb.get_serial_by_id, self._device_path ) return self.async_create_entry( title=self._title, data={ CONF_DEVICE: device_settings, CONF_RADIO_TYPE: self._radio_type.name, }, )
ZHA backup/restore config flow (#77044)
_async_create_radio_entity
f78b39bdbfbe151e8bab72610b6fe03afc8c0747
core
config_flow.py
12
17
https://github.com/home-assistant/core.git
1
94
0
30
143
Python
{ "docstring": "Create a config entity with the current flow state.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
async def _async_create_radio_entity(self) -> FlowResult: assert self._title is not None assert self._radio_type is not None assert self._device_path is not None assert self._device_settings is not None device_settings = self._device_settings.copy() device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job( usb.get_serial_by_id, self._device_path ) return self.async_create_entry( title=self._title, data={ CONF_DEVICE: device_settings, CONF_RADIO_TYPE: self._radio_type.name, }, )
53,905
215,279
52
salt/transport/zeromq.py
17
14
def publish_daemon(self, publish_payload, *args, **kwargs): context = zmq.Context(1) ioloop = salt.ext.tornado.ioloop.IOLoo
Refactor into transports and channels
publish_daemon
d4e6111086ff713eb6609dc6c98cec98aded2564
salt
zeromq.py
11
9
https://github.com/saltstack/salt.git
1
68
0
15
67
Python
{ "docstring": "\n Bind to the interface specified in the configuration file\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
def publish_daemon(self, publish_payload, *args, **kwargs): context = zmq.Context(1) ioloop = salt.ext.tornado.ioloop.IOLoop() ioloop.make_current() # Set up the context
47,917
196,417
362
sympy/printing/str.py
124
23
def _print_Pow(self, expr, rational=False): PREC = precedence(expr) if expr.exp is S.Half and not rational: return "sqrt(%s)" % self._print(expr.base) if expr.is_commutative: if -expr.exp is S.Half and
Moved imports to higher level
_print_Pow
59d22b6bb7287613d598611027f640d068ca5748
sympy
str.py
17
15
https://github.com/sympy/sympy.git
11
218
0
84
347
Python
{ "docstring": "Printing helper function for ``Pow``\n\n Parameters\n ==========\n\n rational : bool, optional\n If ``True``, it will not attempt printing ``sqrt(x)`` or\n ``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)``\n instead.\n\n See examples for additional details\n\n Examples\n ========\n\n >>> from sympy import sqrt, StrPrinter\n >>> from sympy.abc import x\n\n How ``rational`` keyword works with ``sqrt``:\n\n >>> printer = StrPrinter()\n >>> printer._print_Pow(sqrt(x), rational=True)\n 'x**(1/2)'\n >>> printer._print_Pow(sqrt(x), rational=False)\n 'sqrt(x)'\n >>> printer._print_Pow(1/sqrt(x), rational=True)\n 'x**(-1/2)'\n >>> printer._print_Pow(1/sqrt(x), rational=False)\n '1/sqrt(x)'\n\n Notes\n =====\n\n ``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy,\n so there is no need of defining a separate printer for ``sqrt``.\n Instead, it should be handled here as well.\n ", "language": "en", "n_whitespaces": 307, "n_words": 102, "vocab_size": 81 }
def _print_Pow(self, expr, rational=False): PREC = precedence(expr) if expr.exp is S.Half and not rational: return "sqrt(%s)" % self._print(expr.base) if expr.is_commutative: if -expr.exp is S.Half and not rational: # Note: Don't test "expr.exp == -S.Half" here, because that will # match -0.5, which we don't want. return "%s/sqrt(%s)" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base))) if expr.exp is -S.One: # Similarly to the S.Half case, don't test with "==" here. return '%s/%s' % (self._print(S.One), self.parenthesize(expr.base, PREC, strict=False)) e = self.parenthesize(expr.exp, PREC, strict=False) if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1: # the parenthesized exp should be '(Rational(a, b))' so strip parens, # but just check to be sure. if e.startswith('(Rational'): return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1]) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e)
117,386
320,838
279
qutebrowser/misc/sessions.py
70
21
def _save_tab(self, tab, active, minimal=False): data: _JsonType = {'history': []} if active: data['active'] = True if minimal: history = [tab.history.current_item()] else: history = tab.history for idx, item in enumerate(history): qtutils.ensure_valid(item) item_data = self._save_tab_item(tab, idx, item) if item.url().scheme() == 'qute' and item.url().host() == 'back': # don't add qute://back to the session file if item_data.get('active', False) and data['history']: # mark entry before qute://back as active data['history'][-1]
Add --minimal option to session-save command Currently the session-save commande make a dump of all tabs history and stores them in the session file. --minimal flag adds the option to store only the last item of the history. Signed-off-by: shirenn <[email protected]>
_save_tab
4026854f45b63ec71bdbef42d71831beb5f10714
qutebrowser
sessions.py
16
17
https://github.com/qutebrowser/qutebrowser.git
8
148
0
54
253
Python
{ "docstring": "Get a dict with data for a single tab.\n\n Args:\n tab: The WebView to save.\n active: Whether the tab is currently active.\n ", "language": "en", "n_whitespaces": 58, "n_words": 22, "vocab_size": 21 }
def _save_tab(self, tab, active, minimal=False): data: _JsonType = {'history': []} if active: data['active'] = True if minimal: history = [tab.history.current_item()] else: history = tab.history for idx, item in enumerate(history): qtutils.ensure_valid(item) item_data = self._save_tab_item(tab, idx, item) if item.url().scheme() == 'qute' and item.url().host() == 'back': # don't add qute://back to the session file if item_data.get('active', False) and data['history']: # mark entry before qute://back as active data['history'][-1]['active'] = True else: data['history'].append(item_data) return data
29,751
132,415
141
python/ray/tune/tests/test_checkpoint_manager.py
38
18
def testBestCheckpoints(self): keep_checkpoints_num = 4 checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num) checkpoints = [ Checkpoint(Checkpoint.PERSISTENT, i, self.mock_result(i)) for i in range(16) ] random.shuffle(checkpoints) for checkpoint in checkpoints: checkpoint_manager.on_checkpoint(checkpoint) best_checkpoints = checkpoint_manager.best_checkpoints() self.assertEqual(len(best_checkpoints), keep_checkpoints_num) for i in range(len(best_checkpoints)): self.assertEqual(best_checkpoints[i].val
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
testBestCheckpoints
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_checkpoint_manager.py
11
13
https://github.com/ray-project/ray.git
4
104
0
29
164
Python
{ "docstring": "\n Tests that the best checkpoints are tracked and ordered correctly.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def testBestCheckpoints(self): keep_checkpoints_num = 4 checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num) checkpoints = [ Checkpoint(Checkpoint.PERSISTENT, i, self.mock_result(i)) for i in range(16) ] random.shuffle(checkpoints) for checkpoint in checkpoints: checkpoint_manager.on_checkpoint(checkpoint) best_checkpoints = checkpoint_manager.best_checkpoints() self.assertEqual(len(best_checkpoints), keep_checkpoints_num) for i in range(len(best_checkpoints)): self.assertEqual(best_checkpoints[i].value, i + 12)
40,839
173,342
182
cps/config_sql.py
40
20
def save(self): s = self._read_from_storage() # type: _Settings for k, v in self.__dict__.items(): if k[0] == '_': continue if hasattr(s, k): setattr(s, k, v) log.debug("_ConfigSQL updating storage") self._session.merge(s) try: self._sessi
Code cosmetics
save
4ea80e9810a14ca3617f08a4ae5cfa6b50482e9a
calibre-web
config_sql.py
11
15
https://github.com/janeczku/calibre-web.git
5
99
0
38
170
Python
{ "docstring": "Apply all configuration values to the underlying storage.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def save(self): s = self._read_from_storage() # type: _Settings for k, v in self.__dict__.items(): if k[0] == '_': continue if hasattr(s, k): setattr(s, k, v) log.debug("_ConfigSQL updating storage") self._session.merge(s) try: self._session.commit() except OperationalError as e: log.error('Database error: %s', e) self._session.rollback() self.load()
18,538
89,408
636
tests/sentry/rules/history/test_preview.py
153
36
def test_transactions(self): prev_hour = timezone.now() - timedelta(hours=1) event = self.transaction_data.copy() event.update( { "start_timestamp": iso_format(prev_hour - timedelta(minutes=1)), "timestamp": iso_format(prev_hour), "tags": {"foo": "bar"}, "transaction": "this is where a transaction's 'message' is stored", } ) transaction = self.store_event(project_id=self.project.id, data=event) perf_issue = transaction.groups[0] perf_issue.update(first_seen=prev_hour) Activity.objects.create( project=self.project, group=perf_issue, type=ActivityType.SET_REGRESSION.value, datetime=prev_hour, data={"event_id": transaction.event_id}, ) conditions = [{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"}] filters = [ { "id": "sentry.rules.filters.tagged_event.TaggedEventFilter", "key": "foo", "match": "eq", "value": "bar", } ] result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id in result filters[0]["value"] = "baz" result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id not in result filters = [ { "id": "sentry.rules.filters.event_attribute.EventAttributeFilter", "attribute": "message", "match": "eq", "value": "this is where a transaction's 'message' is stored", } ] result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id in result filters[0]["value"] = "wrong message" result = preview(self.project, conditions, filters, "all", "all", 0)
feat(alert-preview): last triggered (#42098) Attaches `last_triggered` to group info. `preview` now returns a mapping of group_ids to triggers, updated tests to reflect that.
test_transactions
583a7ec15744b2ca8a9c56df484516111dbf783d
sentry
test_preview.py
15
59
https://github.com/getsentry/sentry.git
1
311
0
81
524
Python
{ "docstring": "\n conditions = [{\"id\": \"sentry.rules.conditions.first_seen_event.FirstSeenEventCondition\"}]\n filters = [{\n \"id\": \"sentry.rules.filters.tagged_event.TaggedEventFilter\",\n \"key\": \"foo\",\n \"match\": \"eq\",\n \"value\": \"bar\",\n }]\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id in result\n ", "language": "en", "n_whitespaces": 115, "n_words": 28, "vocab_size": 24 }
def test_transactions(self): prev_hour = timezone.now() - timedelta(hours=1) event = self.transaction_data.copy() event.update( { "start_timestamp": iso_format(prev_hour - timedelta(minutes=1)), "timestamp": iso_format(prev_hour), "tags": {"foo": "bar"}, "transaction": "this is where a transaction's 'message' is stored", } ) transaction = self.store_event(project_id=self.project.id, data=event) perf_issue = transaction.groups[0] perf_issue.update(first_seen=prev_hour) Activity.objects.create( project=self.project, group=perf_issue, type=ActivityType.SET_REGRESSION.value, datetime=prev_hour, data={"event_id": transaction.event_id}, ) conditions = [{"id": "sentry.rules.conditions.regression_event.RegressionEventCondition"}] filters = [ { "id": "sentry.rules.filters.tagged_event.TaggedEventFilter", "key": "foo", "match": "eq", "value": "bar", } ] result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id in result filters[0]["value"] = "baz" result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id not in result filters = [ { "id": "sentry.rules.filters.event_attribute.EventAttributeFilter", "attribute": "message", "match": "eq", "value": "this is where a transaction's 'message' is stored", } ] result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id in result filters[0]["value"] = "wrong message" result = preview(self.project, conditions, filters, "all", "all", 0) assert perf_issue.id not in result # this can be tested when SNS-1891 is fixed
19,271
96,067
81
tests/sentry/models/test_release.py
12
9
def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(self): assert ( follows_semver_versioning_scheme( org_id=self.org.id, project_id=self.proj_1.id, release_version="2.0.0" ) is False )
fix(semver): Fixes semver check bug (#31567) Fixes bug that considers a release to be following semver even if the release does not have a package
test_follows_semver_all_releases_semver_and_missing_package_semver_release_version
8e70206e59a81fba2f9a833aed8aa762848c335c
sentry
test_release.py
12
7
https://github.com/getsentry/sentry.git
1
33
0
11
54
Python
{ "docstring": "\n Test that ensures that even if a project is following semver, then if the release_version\n supplied lacks a package, then for that specific release we opt the project out of being\n considered a semver project\n ", "language": "en", "n_whitespaces": 64, "n_words": 35, "vocab_size": 26 }
def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(self): assert ( follows_semver_versioning_scheme( org_id=self.org.id, project_id=self.proj_1.id, release_version="2.0.0" ) is False )
48,612
197,534
22
sympy/stats/joint_rv_types.py
18
7
def MultivariateT(syms, mu, sigma, v): return multivariate_rv(Mu
Improved some documentation in the stats module
MultivariateT
7fe8e027ae1d7f683243c0229b961671a6cbb4c5
sympy
joint_rv_types.py
7
2
https://github.com/sympy/sympy.git
1
25
0
16
37
Python
{ "docstring": "\n Creates a joint random variable with multivariate T-distribution.\n\n Parameters\n ==========\n\n syms : A symbol/str\n For identifying the random variable.\n mu : A list/matrix\n Representing the location vector\n sigma : The shape matrix for the distribution\n\n Examples\n ========\n\n >>> from sympy.stats import density, MultivariateT\n >>> from sympy import Symbol\n\n >>> x = Symbol(\"x\")\n >>> X = MultivariateT(\"x\", [1, 1], [[1, 0], [0, 1]], 2)\n\n >>> density(X)(1, 2)\n 2/(9*pi)\n\n Returns\n =======\n\n RandomSymbol\n\n ", "language": "en", "n_whitespaces": 139, "n_words": 70, "vocab_size": 56 }
def MultivariateT(syms, mu, sigma, v): return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v) #------------------------------------------------------------------------------- # Multivariate Normal Gamma distribution ---------------------------------------
39,678
165,559
49
pandas/core/indexes/base.py
17
8
def _can_hold_identifiers_and_holds_name(self, name) -> bool: if self.is_object() or is_string_dtype(self.dtype) or self.is_categorical(): return name in self return False
BUG: DataFrame.getattribute raising if columns have dtype string (#46301)
_can_hold_identifiers_and_holds_name
3aec1d5756f363e25062914dbb82bd8b25b399ce
pandas
base.py
10
12
https://github.com/pandas-dev/pandas.git
4
36
0
15
60
Python
{ "docstring": "\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n ", "language": "en", "n_whitespaces": 84, "n_words": 41, "vocab_size": 39 }
def _can_hold_identifiers_and_holds_name(self, name) -> bool: if self.is_object() or is_string_dtype(self.dtype) or self.is_categorical(): return name in self return False
71,773
247,605
282
tests/handlers/test_directory.py
63
15
def test_remove_other_alias(self) -> None: # Create a second alias. other_test_alias = "#test2:test" other_room_alias = self._add_alias(other_test_alias) # Set the alias as the canonical alias for this room. self._set_canonical_alias( { "alias": self.test_alias, "alt_aliases": [self.test_alias, other_test_alias], } ) data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual( data["content"]["alt_aliases"], [self.test_alias, other_test_alias] ) # Delete the second alia
Add type hints to some tests/handlers files. (#12224)
test_remove_other_alias
5dd949bee6158a8b651db9f2ae417a62c8184bfd
synapse
test_directory.py
12
23
https://github.com/matrix-org/synapse.git
1
146
0
44
247
Python
{ "docstring": "Removing an alias listed as in alt_aliases should remove it there too.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_remove_other_alias(self) -> None: # Create a second alias. other_test_alias = "#test2:test" other_room_alias = self._add_alias(other_test_alias) # Set the alias as the canonical alias for this room. self._set_canonical_alias( { "alias": self.test_alias, "alt_aliases": [self.test_alias, other_test_alias], } ) data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual( data["content"]["alt_aliases"], [self.test_alias, other_test_alias] ) # Delete the second alias. self.get_success( self.handler.delete_association( create_requester(self.admin_user), other_room_alias ) ) data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual(data["content"]["alt_aliases"], [self.test_alias])
117,026
319,960
60
src/documents/tests/test_api.py
10
8
def test_get_comments_no_doc(self): response = self.client.get( "/api/documents/500/comments/", format="js
Starts on implementing tests for the new API
test_get_comments_no_doc
6d5d308d6c7b7e359ba72964a300634e1065ace9
paperless-ngx
test_api.py
10
6
https://github.com/paperless-ngx/paperless-ngx.git
1
31
0
10
54
Python
{ "docstring": "\n GIVEN:\n - A request to get comments from a non-existent document\n WHEN:\n - API request for document comments is made\n THEN:\n - HTTP 404 is returned\n ", "language": "en", "n_whitespaces": 88, "n_words": 26, "vocab_size": 20 }
def test_get_comments_no_doc(self): response = self.client.get( "/api/documents/500/comments/", format="json", ) self.assertEqual(response.status_code, 404)
117,678
321,351
62
tests/unit/keyinput/test_basekeyparser.py
20
20
def test_mapping_keypad(self, config_stub, keyparser): config_stub.val.bindings.commands = {'normal': {'a': 'nop'}} config_stub.val.bindings.key_
Run scripts/dev/rewrite_enums.py
test_mapping_keypad
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
test_basekeyparser.py
11
6
https://github.com/qutebrowser/qutebrowser.git
1
78
0
18
134
Python
{ "docstring": "Make sure falling back to non-numpad keys works with mappings.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_mapping_keypad(self, config_stub, keyparser): config_stub.val.bindings.commands = {'normal': {'a': 'nop'}} config_stub.val.bindings.key_mappings = {'1': 'a'} info = keyutils.KeyInfo(Qt.Key.Key_1, Qt.KeyboardModifier.KeypadModifier) keyparser.handle(info.to_event()) keyparser.execute.assert_called_once_with('nop', None)
826
5,799
71
instapy/like_util.py
32
14
def verify_liked_image(browser, logger): browser.refresh() unlike_xpath = read_xpath(like_image.__name__, "un
PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438) * Updated getUserData() and find_element* Signed-off-by: elulcao <[email protected]> Thanks @breuerfelix for reviewing, 🚀 People in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her
verify_liked_image
2a157d452611d37cf50ccb7d56ff1a06e9790ecb
InstaPy
like_util.py
11
9
https://github.com/InstaPy/InstaPy.git
2
55
0
30
94
Python
{ "docstring": "Check for a ban on likes using the last liked image", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def verify_liked_image(browser, logger): browser.refresh() unlike_xpath = read_xpath(like_image.__name__, "unlike") like_elem = browser.find_elements(By.XPATH, unlike_xpath) if len(like_elem) == 1: return True else: logger.warning("--> Image was NOT liked! You have a BLOCK on likes!") return False
70,811
245,505
41
mmdet/structures/mask/structures.py
13
8
def get_bboxes(self, dst_type='hbb'): from ..bbox import get_box_type
[Refactor] Refactor pipelines with boxlist. (#8562) * Refactor pipelines and data_preprocesser by boxlist * Refactor browse_dataset.py * Update * Update * Update * Update * update * Update * Change with_box_wrapped to with_boxlist * Fix comments * Fix commits * Update UT
get_bboxes
af063a6f25ddae4de90646f86b2db824f3d00138
mmdetection
structures.py
8
4
https://github.com/open-mmlab/mmdetection.git
1
31
0
13
55
Python
{ "docstring": "Get the certain type boxes from masks.\n\n Please refer to ``mmdet.structures.bbox.box_type`` for more details of\n the box type.\n\n Args:\n dst_type: Destination box type.\n\n Returns:\n :obj:`BaseBoxes`: Certain type boxes.\n ", "language": "en", "n_whitespaces": 85, "n_words": 28, "vocab_size": 24 }
def get_bboxes(self, dst_type='hbb'): from ..bbox import get_box_type _, box_type_cls = get_box_type(dst_type) return box_type_cls.from_instance_masks(self)
48,349
197,116
41
sympy/tensor/tensor.py
8
5
def deprecate_call(): sympy_deprecation_warning( , deprecated_since_version="1.5", active_deprecations_target="deprecated-tensor-
Update the various tensor deprecations
deprecate_call
cba899d4137b0b65f6850120ee42cd4fcd4f9dbf
sympy
tensor.py
9
10
https://github.com/sympy/sympy.git
1
21
0
8
37
Python
{ "docstring": "\n Calling a tensor like Tensor(*indices) is deprecated. Use\n Tensor.substitute_indices() instead.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
def deprecate_call(): sympy_deprecation_warning( , deprecated_since_version="1.5", active_deprecations_target="deprecated-tensor-fun-eval", stacklevel=4, )
12,059
60,271
194
code/deep/BJMMD/caffe/python/caffe/net_spec.py
59
19
def assign_proto(proto, name, val): is_repeated_field = hasattr(getattr(proto, name), 'extend') if is_repeated_field and not isinstance(val, list): val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: proto_item = getattr(proto, name).add() for k, v in six.iteritems(item):
Balanced joint maximum mean discrepancy for deep transfer learning
assign_proto
cc4d0564756ca067516f71718a3d135996525909
transferlearning
net_spec.py
16
17
https://github.com/jindongwang/transferlearning.git
9
151
0
37
230
Python
{ "docstring": "Assign a Python object to a protobuf message, based on the Python\n type (in recursive fashion). Lists become repeated fields/messages, dicts\n become messages, and other types are assigned directly. For convenience,\n repeated fields whose values are not lists are converted to single-element\n lists; e.g., `my_repeated_int_field=3` is converted to\n `my_repeated_int_field=[3]`.", "language": "en", "n_whitespaces": 63, "n_words": 49, "vocab_size": 40 }
def assign_proto(proto, name, val): is_repeated_field = hasattr(getattr(proto, name), 'extend') if is_repeated_field and not isinstance(val, list): val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: proto_item = getattr(proto, name).add() for k, v in six.iteritems(item): assign_proto(proto_item, k, v) else: getattr(proto, name).extend(val) elif isinstance(val, dict): for k, v in six.iteritems(val): assign_proto(getattr(proto, name), k, v) else: setattr(proto, name, val)
48,155
196,759
66
sympy/assumptions/handlers/common.py
13
8
def __new__(cls, *args, **kwargs): sympy_deprecation_warning( , depr
Update the AskHandler deprecation warnings n.b., the issue number in the original warning message was wrong. It should have been #20837.
__new__
ad766d1c02943e86f50559abfd0c72e582c9ca6a
sympy
common.py
9
10
https://github.com/sympy/sympy.git
1
39
0
12
64
Python
{ "docstring": "\n The AskHandler system is deprecated. The AskHandler class should\n be replaced with the multipledispatch handler of Predicate\n ", "language": "en", "n_whitespaces": 51, "n_words": 17, "vocab_size": 15 }
def __new__(cls, *args, **kwargs): sympy_deprecation_warning( , deprecated_since_version="1.8", active_deprecations_target='deprecated-askhandler', ) return super().__new__(cls, *args, **kwargs)
76,613
260,996
308
sklearn/utils/sparsefuncs.py
121
22
def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None): _raise_error_wrong_axis(axis) if not isinstance(X, (sp.csr_matrix, sp.csc_matrix)): _raise_typeerror(X) if np.size(last_n) == 1: last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype) if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)): raise ValueError("last_mean, last_var, last_n do not have the same shapes.")
DOC Ensures that incr_mean_variance_axis passes numpydoc validation (#24477)
incr_mean_variance_axis
02e36b4d866d7c7b14397ab291cb3e97d1105d5c
scikit-learn
sparsefuncs.py
17
26
https://github.com/scikit-learn/scikit-learn.git
9
206
0
74
366
Python
{ "docstring": "Compute incremental mean and variance along an axis on a CSR or CSC matrix.\n\n last_mean, last_var are the statistics computed at the last step by this\n function. Both must be initialized to 0-arrays of the proper size, i.e.\n the number of features in X. last_n is the number of samples encountered\n until now.\n\n Parameters\n ----------\n X : CSR or CSC sparse matrix of shape (n_samples, n_features)\n Input data.\n\n axis : {0, 1}\n Axis along which the axis should be computed.\n\n last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Array of means to update with the new data X.\n Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.\n\n last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Array of variances to update with the new data X.\n Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.\n\n last_n : float or ndarray of shape (n_features,) or (n_samples,), \\\n dtype=floating\n Sum of the weights seen so far, excluding the current weights\n If not float, it should be of shape (n_features,) if\n axis=0 or (n_samples,) if axis=1. If float it corresponds to\n having same weights for all samples (or features).\n\n weights : ndarray of shape (n_samples,) or (n_features,), default=None\n If axis is set to 0 shape is (n_samples,) or\n if axis is set to 1 shape is (n_features,).\n If it is set to None, then samples are equally weighted.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n means : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Updated feature-wise means if axis = 0 or\n sample-wise means if axis = 1.\n\n variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Updated feature-wise variances if axis = 0 or\n sample-wise variances if axis = 1.\n\n n : ndarray of shape (n_features,) or (n_samples,), dtype=integral\n Updated number of seen samples per feature if axis=0\n or number of seen features per sample if axis=1.\n\n If weights is not None, n is a sum of the weights of the seen\n samples or features instead of the actual number of seen\n samples or features.\n\n Notes\n -----\n NaNs are ignored in the algorithm.\n ", "language": "en", "n_whitespaces": 579, "n_words": 344, "vocab_size": 134 }
def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None): _raise_error_wrong_axis(axis) if not isinstance(X, (sp.csr_matrix, sp.csc_matrix)): _raise_typeerror(X) if np.size(last_n) == 1: last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype) if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)): raise ValueError("last_mean, last_var, last_n do not have the same shapes.") if axis == 1: if np.size(last_mean) != X.shape[0]: raise ValueError( "If axis=1, then last_mean, last_n, last_var should be of " f"size n_samples {X.shape[0]} (Got {np.size(last_mean)})." ) else: # axis == 0 if np.size(last_mean) != X.shape[1]: raise ValueError( "If axis=0, then last_mean, last_n, last_var should be of " f"size n_features {X.shape[1]} (Got {np.size(last_mean)})." ) X = X.T if axis == 1 else X if weights is not None: weights = _check_sample_weight(weights, X, dtype=X.dtype) return _incr_mean_var_axis0( X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights )
106,866
308,105
346
tests/components/homekit/test_type_thermostats.py
118
38
async def test_thermostat_with_no_off_after_recheck(hass, hk_driver, events): entity_id = "climate.test" # support_auto = True hass.states.async_set( entity_id, HVACMode.COOL, { ATTR_SUPPORTED_FEATURES: SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE, ATTR_HVAC_MODES: [], }, ) await hass.async_block_till_done() acc = Thermostat(hass, hk_driver, "Climate", entity_id, 1, None) hk_driver.add_accessory(acc) await acc.run() await hass.async_block_till_done() assert acc.char_cooling_thresh_temp.value == 23.0 assert acc.char_heating_thresh_temp.value == 19.0 assert acc.char_cooling_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_cooling_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_cooling_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_heating_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_heating_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_heating_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_target_heat_cool.value == 2
Cleanup HVACAction and HVACMode in tests (#78813)
test_thermostat_with_no_off_after_recheck
f453726b1862d1d247f6aefdd5f23455b87c11cf
core
test_type_thermostats.py
11
43
https://github.com/home-assistant/core.git
1
294
0
69
406
Python
{ "docstring": "Test if a thermostat that is not ready when we first see it that actually does not have off.", "language": "en", "n_whitespaces": 18, "n_words": 19, "vocab_size": 17 }
async def test_thermostat_with_no_off_after_recheck(hass, hk_driver, events): entity_id = "climate.test" # support_auto = True hass.states.async_set( entity_id, HVACMode.COOL, { ATTR_SUPPORTED_FEATURES: SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE, ATTR_HVAC_MODES: [], }, ) await hass.async_block_till_done() acc = Thermostat(hass, hk_driver, "Climate", entity_id, 1, None) hk_driver.add_accessory(acc) await acc.run() await hass.async_block_till_done() assert acc.char_cooling_thresh_temp.value == 23.0 assert acc.char_heating_thresh_temp.value == 19.0 assert acc.char_cooling_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_cooling_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_cooling_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_heating_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_heating_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_heating_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_target_heat_cool.value == 2 hass.states.async_set( entity_id, HVACMode.HEAT_COOL, { ATTR_TARGET_TEMP_HIGH: 22.0, ATTR_TARGET_TEMP_LOW: 20.0, ATTR_CURRENT_TEMPERATURE: 18.0, ATTR_HVAC_ACTION: HVACAction.HEATING, ATTR_HVAC_MODES: [HVACMode.HEAT_COOL, HVACMode.AUTO], }, ) await hass.async_block_till_done() assert acc.char_heating_thresh_temp.value == 20.0 assert acc.char_cooling_thresh_temp.value == 22.0 assert acc.char_current_heat_cool.value == 1 assert acc.char_target_heat_cool.value == 3 assert acc.char_current_temp.value == 18.0 assert acc.char_display_units.value == 0
16,548
76,592
69
wagtail/contrib/forms/models.py
19
10
def save(self, *args, **kwargs): is_new = self.pk is None
AbstractFormField.save - add to the docstring about clean name
save
10f8e8d21640f019eeb22e91ba3ee1c5284c4574
wagtail
models.py
11
6
https://github.com/wagtail/wagtail.git
2
47
0
16
78
Python
{ "docstring": "\n When new fields are created, generate a template safe ascii name to use as the\n JSON storage reference for this field. Previously created fields will be updated\n to use the legacy unidecode method via checks & _migrate_legacy_clean_name.\n We do not want to update the clean name on any subsequent changes to the label\n as this would invalidate any previously submitted data.\n ", "language": "en", "n_whitespaces": 104, "n_words": 61, "vocab_size": 49 }
def save(self, *args, **kwargs): is_new = self.pk is None if is_new: clean_name = get_field_clean_name(self.label) self.clean_name = clean_name super().save(*args, **kwargs)
29,934
133,135
162
python/ray/util/dask/scheduler.py
77
17
def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *args): if ray_pretask_cbs is not None: pre_states = [ cb(key, args) if cb is not None else None for cb in ray_pretask_cbs ] repacked_args, repacked_deps = repack(args) # Recursively execute Dask-inlined tasks. actual_args = [_execute_task(a, repacked_deps) for a in repacked_args] # Execute the actual underlying Dask task. result = func(*actual_args) if ray_posttask_cbs is not None: for cb, pre_state in zip(ray_posttask_cbs, pre_states): if cb is not None: cb(key, result, pre_state) return result
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
dask_task_wrapper
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
scheduler.py
13
13
https://github.com/ray-project/ray.git
8
107
0
52
159
Python
{ "docstring": "\n A Ray remote function acting as a Dask task wrapper. This function will\n repackage the given flat `args` into its original data structures using\n `repack`, execute any Dask subtasks within the repackaged arguments\n (inlined by Dask's optimization pass), and then pass the concrete task\n arguments to the provide Dask task function, `func`.\n\n Args:\n func (callable): The Dask task function to execute.\n repack (callable): A function that repackages the provided args into\n the original (possibly nested) Python objects.\n key (str): The Dask key for this task.\n ray_pretask_cbs (callable): Pre-task execution callbacks.\n ray_posttask_cbs (callable): Post-task execution callback.\n *args (ObjectRef): Ray object references representing the Dask task's\n arguments.\n\n Returns:\n The output of the Dask task. In the context of Ray, a\n dask_task_wrapper.remote() invocation will return a Ray object\n reference representing the Ray task's result.\n ", "language": "en", "n_whitespaces": 241, "n_words": 131, "vocab_size": 87 }
def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *args): if ray_pretask_cbs is not None: pre_states = [ cb(key, args) if cb is not None else None for cb in ray_pretask_cbs ] repacked_args, repacked_deps = repack(args) # Recursively execute Dask-inlined tasks. actual_args = [_execute_task(a, repacked_deps) for a in repacked_args] # Execute the actual underlying Dask task. result = func(*actual_args) if ray_posttask_cbs is not None: for cb, pre_state in zip(ray_posttask_cbs, pre_states): if cb is not None: cb(key, result, pre_state) return result
51,651
206,716
114
django/utils/lorem_ipsum.py
42
13
def words(count, common=True): word_list = list(COMMON_WORDS) if common else [] c = len(word_list) if count > c: count -= c while count > 0: c = min(count, len(WORDS))
Refs #33476 -- Reformatted code with Black.
words
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
lorem_ipsum.py
14
12
https://github.com/django/django.git
4
80
0
28
131
Python
{ "docstring": "\n Return a string of `count` lorem ipsum words separated by a single space.\n\n If `common` is True, then the first 19 words will be the standard\n 'lorem ipsum' words. Otherwise, all words will be selected randomly.\n ", "language": "en", "n_whitespaces": 49, "n_words": 36, "vocab_size": 30 }
def words(count, common=True): word_list = list(COMMON_WORDS) if common else [] c = len(word_list) if count > c: count -= c while count > 0: c = min(count, len(WORDS)) count -= c word_list += random.sample(WORDS, c) else: word_list = word_list[:count] return " ".join(word_list)
12,969
62,402
68
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_inputstream.py
18
9
def jumpTo(self, bytes):
upd; format
jumpTo
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
_inputstream.py
13
6
https://github.com/jindongwang/transferlearning.git
2
38
0
18
62
Python
{ "docstring": "Look for the next sequence of bytes matching a given sequence. If\n a match is found advance the position to the last byte of the match", "language": "en", "n_whitespaces": 32, "n_words": 26, "vocab_size": 20 }
def jumpTo(self, bytes): try: self._position = self.index(bytes, self.position) + len(bytes) - 1 except ValueError: raise StopIteration return True
89,301
290,182
78
homeassistant/components/mqtt/binary_sensor.py
31
11
def available(self) -> bool: expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER)
Improve MQTT type hints part 1 (#80523) * Improve typing alarm_control_panel * Improve typing binary_sensor * Improve typing button * Add misssed annotation * Move CONF_EXPIRE_AFTER to _setup_from_config * Use CALL_BACK type * Remove assert, improve code style
available
b4ad03784f1d02995da39f3094c80adb4a60492b
core
binary_sensor.py
10
6
https://github.com/home-assistant/core.git
3
42
0
29
71
Python
{ "docstring": "Return true if the device is available and value has not expired.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def available(self) -> bool: expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER) # mypy doesn't know about fget: https://github.com/python/mypy/issues/6185 return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined] expire_after is None or not self._expired )
48,109
196,691
18
sympy/stats/crv_types.py
15
6
def Logistic(name, mu, s): r return rv(name, LogisticDistribution, (mu, s)) #-----------
Documentation cleanup 5
Logistic
9ad8ab9fe58051cf11626ba6654852fcfec60147
sympy
crv_types.py
8
49
https://github.com/sympy/sympy.git
1
24
0
15
36
Python
{ "docstring": "\n Create a continuous random variable with a logistic distribution.\n\n Explanation\n ===========\n\n The density of the logistic distribution is given by\n\n .. math::\n f(x) := \\frac{e^{-(x-\\mu)/s}} {s\\left(1+e^{-(x-\\mu)/s}\\right)^2}\n\n Parameters\n ==========\n\n mu : Real number, the location (mean)\n s : Real number, `s > 0`, a scale\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import Logistic, density, cdf\n >>> from sympy import Symbol\n\n >>> mu = Symbol(\"mu\", real=True)\n >>> s = Symbol(\"s\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = Logistic(\"x\", mu, s)\n\n >>> density(X)(z)\n exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)\n\n >>> cdf(X)(z)\n 1/(exp((mu - z)/s) + 1)\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Logistic_distribution\n .. [2] http://mathworld.wolfram.com/LogisticDistribution.html\n\n ", "language": "en", "n_whitespaces": 200, "n_words": 105, "vocab_size": 77 }
def Logistic(name, mu, s): r return rv(name, LogisticDistribution, (mu, s)) #------------------------------------------------------------------------------- # Log-logistic distribution --------------------------------------------------------
4,867
25,205
845
ppocr/modeling/heads/local_graph.py
146
58
def __call__(self, feat_maps, comp_attribs): assert isinstance(feat_maps, paddle.Tensor) assert comp_attribs.ndim == 3 assert comp_attribs.shape[2] == 8 sorted_dist_inds_batch = [] local_graph_batch = [] knn_batch = [] node_feat_batch = [] node_label_batch = [] for batch_ind in range(comp_attribs.shape[0]): num_comps = int(comp_attribs[batch_ind, 0, 0]) comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7] node_labels = comp_attribs[batch_ind, :num_comps, 7].astype( np.int32) comp_centers = comp_geo_attribs[:, 0:2] distance_matrix = euclidean_distance_matrix(comp_centers, comp_centers)
add drrg
__call__
1f9400dd7374ce9cc47981372e324ff412e53ba3
PaddleOCR
local_graph.py
14
48
https://github.com/PaddlePaddle/PaddleOCR.git
2
406
0
103
607
Python
{ "docstring": "Generate local graphs as GCN input.\n\n Args:\n feat_maps (Tensor): The feature maps to extract the content\n features of text components.\n comp_attribs (ndarray): The text component attributes.\n\n Returns:\n local_graphs_node_feat (Tensor): The node features of graph.\n adjacent_matrices (Tensor): The adjacent matrices of local graphs.\n pivots_knn_inds (Tensor): The k-nearest neighbor indices in local\n graph.\n gt_linkage (Tensor): The surpervision signal of GCN for linkage\n prediction.\n ", "language": "en", "n_whitespaces": 193, "n_words": 61, "vocab_size": 43 }
def __call__(self, feat_maps, comp_attribs): assert isinstance(feat_maps, paddle.Tensor) assert comp_attribs.ndim == 3 assert comp_attribs.shape[2] == 8 sorted_dist_inds_batch = [] local_graph_batch = [] knn_batch = [] node_feat_batch = [] node_label_batch = [] for batch_ind in range(comp_attribs.shape[0]): num_comps = int(comp_attribs[batch_ind, 0, 0]) comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7] node_labels = comp_attribs[batch_ind, :num_comps, 7].astype( np.int32) comp_centers = comp_geo_attribs[:, 0:2] distance_matrix = euclidean_distance_matrix(comp_centers, comp_centers) batch_id = np.zeros( (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1) angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign( comp_geo_attribs[:, -1]) angle = angle.reshape((-1, 1)) rotated_rois = np.hstack( [batch_id, comp_geo_attribs[:, :-2], angle]) rois = paddle.to_tensor(rotated_rois) content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0), rois) content_feats = content_feats.reshape([content_feats.shape[0], -1]) geo_feats = feature_embedding(comp_geo_attribs, self.node_geo_feat_dim) geo_feats = paddle.to_tensor(geo_feats) node_feats = paddle.concat([content_feats, geo_feats], axis=-1) sorted_dist_inds = np.argsort(distance_matrix, axis=1) pivot_local_graphs, pivot_knns = self.generate_local_graphs( sorted_dist_inds, node_labels) node_feat_batch.append(node_feats) node_label_batch.append(node_labels) local_graph_batch.append(pivot_local_graphs) knn_batch.append(pivot_knns) sorted_dist_inds_batch.append(sorted_dist_inds) (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \ self.generate_gcn_input(node_feat_batch, node_label_batch, local_graph_batch, knn_batch, sorted_dist_inds_batch) return node_feats, adjacent_matrices, knn_inds, gt_linkage
35,525
153,659
244
modin/experimental/core/execution/native/implementations/omnisci_on_native/exchange/dataframe_protocol/dataframe.py
85
16
def _is_zero_copy_arrow_op(cls, op) -> bool: is_zero_copy_op = False if isinstance(op, (FrameNode, TransformNode, UnionNode)): # - FrameNode: already materialized PyArrow table # - TransformNode: select certain columns of the table, implemented zero-copy (``df._arrow_select``) # - UnionNode: concatenate PyArrow tables, implemented zero-copy (``df._arrow_concat``) is_zero_copy_op = True elif isinstance(op, Mas
FEAT-#4244: Implement dataframe exchange protocol for OmniSci (#4269) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Signed-off-by: Dmitry Chigarev <[email protected]>
_is_zero_copy_arrow_op
0c1a2129df64cf45bf1ff49c8ed92c510fdb1c82
modin
dataframe.py
12
23
https://github.com/modin-project/modin.git
7
83
0
64
133
Python
{ "docstring": "\n Check whether the passed node of the delayed computation tree could be executed zero-copy via PyArrow execution.\n\n Parameters\n ----------\n op : DFAlgNode\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 82, "n_words": 25, "vocab_size": 24 }
def _is_zero_copy_arrow_op(cls, op) -> bool: is_zero_copy_op = False if isinstance(op, (FrameNode, TransformNode, UnionNode)): # - FrameNode: already materialized PyArrow table # - TransformNode: select certain columns of the table, implemented zero-copy (``df._arrow_select``) # - UnionNode: concatenate PyArrow tables, implemented zero-copy (``df._arrow_concat``) is_zero_copy_op = True elif isinstance(op, MaskNode) and ( isinstance(op.row_positions, slice) or is_range_like(op.row_positions) ): # Can select rows zero-copy if indexer is a slice-like (``df._arrow_row_slice``) is_zero_copy_op = True return is_zero_copy_op and all( # Walk the computation tree cls._is_zero_copy_arrow_op(_op) for _op in getattr(op, "inputs", []) )
@register
53,129
211,688
29
ppdet/modeling/assigners/uniform_assigner.py
18
13
def batch_p_dist(x, y, p=2): x = x.unsqueeze(1) diff = x - y return paddle
support YOLOF (#7336)
batch_p_dist
41d8be66e84d066d98cfabbe13d4c7a5877cb009
PaddleDetection
uniform_assigner.py
14
4
https://github.com/PaddlePaddle/PaddleDetection.git
1
52
1
16
85
Python
{ "docstring": "\n calculate pairwise p_dist, the first index of x and y are batch\n return [x.shape[0], y.shape[0]]\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
def batch_p_dist(x, y, p=2): x = x.unsqueeze(1) diff = x - y return paddle.norm(diff, p=p, axis=list(range(2, diff.dim()))) @register
14,737
68,200
67
erpnext/hr/doctype/shift_assignment/shift_assignment.py
82
16
def get_employee_shift(employee, for_timestamp=None, consider_default_shift=False, next_shift_direction=None): if for_timestamp is None: for_timestamp = now_datetime() shift_details = get_shift_for_timestamp(employee, for_timestamp) # if shift assignment is not found, consider default shift default_shift = frappe.db.get_value('Employee', employee, 'default_shift') if not shift_details and consider_default_shift: shift_details = get_shift_details(default_shift, for_timestamp.date()) # if its a holiday, reset if
refactor: consider timeslots in `get_employee_shift`
get_employee_shift
625a9f69f592be8c50c9b1bd1a16e0b7b9157988
erpnext
shift_assignment.py
12
12
https://github.com/frappe/erpnext.git
8
103
0
51
164
Python
{ "docstring": "Returns a Shift Type for the given employee on the given date. (excluding the holidays)\n\n\t:param employee: Employee for which shift is required.\n\t:param for_timestamp: DateTime on which shift is required\n\t:param consider_default_shift: If set to true, default shift is taken when no shift assignment is found.\n\t:param next_shift_direction: One of: None, 'forward', 'reverse'. Direction to look for next shift if shift not found on given date.\n\t", "language": "en", "n_whitespaces": 62, "n_words": 67, "vocab_size": 45 }
def get_employee_shift(employee, for_timestamp=None, consider_default_shift=False, next_shift_direction=None): if for_timestamp is None: for_timestamp = now_datetime() shift_details = get_shift_for_timestamp(employee, for_timestamp) # if shift assignment is not found, consider default shift default_shift = frappe.db.get_value('Employee', employee, 'default_shift') if not shift_details and consider_default_shift: shift_details = get_shift_details(default_shift, for_timestamp.date()) # if its a holiday, reset if shift_details and is_holiday_date(employee, shift_details): shift_details = None # if no shift is found, find next or prev shift based on direction if not shift_details and next_shift_direction: shift_details = get_prev_or_next_shift(employee, for_timestamp, consider_default_shift, default_shift, next_shift_direction) return shift_details
15,530
70,602
62
wagtail/admin/views/workflows.py
12
5
def get_create_form_class(self): self.create_model = self.get_create_model() if self.create_model: return ge
Split out data retrieval methods from BaseTaskChooserView.dispatch This ensures that we don't do redundant setup for sub-views that don't need it, e.g. setting up creation forms for the results-only view.
get_create_form_class
fb48f9863d8ba1856e0697552fb454de162281b8
wagtail
workflows.py
10
6
https://github.com/wagtail/wagtail.git
2
31
0
11
54
Python
{ "docstring": "\n To be called after dispatch(); returns the form class for creating a new task\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
def get_create_form_class(self): self.create_model = self.get_create_model() if self.create_model: return get_task_form_class(self.create_model) else: return None
5,151
27,995
40
saleor/thumbnail/utils.py
12
9
def retrieve_image(self): image = self.s
Better media thumbnails including WebP support (#9988) * Add thumbnail app * Update get_thumbnail_size method and add tests * Add logic for creating thumbnails * Update logic for getting thumbnail * Allow defining format for tumbnail generation * Clear handle_thumbnail views * Add prepare_image_proxy_url method * Use ImageField for user avatar * Allow defining thumbnail format when querying user avatar * Use ImageField for category backgound_image * Use ImageField for Collection backgound_image * Use ImageField for ProductMedia image * Ensure that thumbnails are deleted when category background_image is changed or deleted * Ensure that thumbnails are deleted when collection background_image is changed or deleted * Update product media deleteion task and failing tests * Delete thumbnail from storage when thumbnail objects is deleted * Fix import in product test_bulk_delete * Drop create_thumbnails command * Update Product.thumbnail resolver * Update OrderLine thumbnail resolver * Add missing ADDED_IN_35 and PREVIEW_FEATURE labels * Update account and product signals - ensure the image is deleted from storage * Refactor product_images methods * Add signal for product media image delete * Drop create_thumbnails method and not longer valid settings fields * Clean the ProcessedImage class * Drop versatileimagefield from INSTALLED_APPS * Update changelog * Drop comments from ThumbnailFormat * Add get_image_or_proxy_url method * Apply reiew suggestions - add ThumbnailField and use get_image_or_proxy_ur when it's possible * Update changelog * Replace ADDED_IN_35 with ADDED_IN_36 label * Update changelog Co-authored-by: Marcin Gębala <[email protected]>
retrieve_image
5d1a36b9aaf408016957db04f86397b2e53c2500
saleor
utils.py
9
4
https://github.com/saleor/saleor.git
1
39
0
11
65
Python
{ "docstring": "Return a PIL Image instance stored at `image_path`.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def retrieve_image(self): image = self.storage.open(self.image_path, "rb") image_format = self.get_image_metadata_from_file(image) return (Image.open(image), image_format)
38,408
159,724
224
numpy/polynomial/chebyshev.py
87
27
def chebval(x, c, tensor=True): c = np.array(c, ndmin=1, copy=True) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else:
MAINT, DOC: discard repeated words
chebval
58dbe260a2e41c31f1ab03e1abdb1f01da4c1edc
numpy
chebyshev.py
14
23
https://github.com/numpy/numpy.git
8
196
0
50
305
Python
{ "docstring": "\n Evaluate a Chebyshev series at points x.\n\n If `c` is of length `n + 1`, this function returns the value:\n\n .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)\n\n The parameter `x` is converted to an array only if it is a tuple or a\n list, otherwise it is treated as a scalar. In either case, either `x`\n or its elements must support multiplication and addition both with\n themselves and with the elements of `c`.\n\n If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If\n `c` is multidimensional, then the shape of the result depends on the\n value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +\n x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that\n scalars have shape (,).\n\n Trailing zeros in the coefficients will be used in the evaluation, so\n they should be avoided if efficiency is a concern.\n\n Parameters\n ----------\n x : array_like, compatible object\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\n it is left unchanged and treated as a scalar. In either case, `x`\n or its elements must support addition and multiplication with\n themselves and with the elements of `c`.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree n are contained in c[n]. If `c` is multidimensional the\n remaining indices enumerate multiple polynomials. In the two\n dimensional case the coefficients may be thought of as stored in\n the columns of `c`.\n tensor : boolean, optional\n If True, the shape of the coefficient array is extended with ones\n on the right, one for each dimension of `x`. Scalars have dimension 0\n for this action. The result is that every column of coefficients in\n `c` is evaluated for every element of `x`. If False, `x` is broadcast\n over the columns of `c` for the evaluation. This keyword is useful\n when `c` is multidimensional. The default value is True.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n values : ndarray, algebra_like\n The shape of the return value is described above.\n\n See Also\n --------\n chebval2d, chebgrid2d, chebval3d, chebgrid3d\n\n Notes\n -----\n The evaluation uses Clenshaw recursion, aka synthetic division.\n\n ", "language": "en", "n_whitespaces": 578, "n_words": 369, "vocab_size": 191 }
def chebval(x, c, tensor=True): c = np.array(c, ndmin=1, copy=True) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: x2 = 2*x c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 c0 = c[-i] - c1 c1 = tmp + c1*x2 return c0 + c1*x
8,767
46,033
63
airflow/www/views.py
21
11
def dagrun_queued(self): dag_i
Add queue button to click-on-DagRun interface. (#21555) * Initial implementation of adding Queue button to DagRun interface * Implement the test cases * FIX Add all required MyPy ignores * FIX import * Update airflow/www/views.py FIX Documentation Co-authored-by: Brent Bovenzi <[email protected]> * update modal UI Co-authored-by: Brent Bovenzi <[email protected]>
dagrun_queued
afd3c135c7d1815c56578d020625a33dc27fe640
airflow
views.py
11
6
https://github.com/apache/airflow.git
1
64
0
18
112
Python
{ "docstring": "Queue DagRun so tasks that haven't run yet can be started.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def dagrun_queued(self): dag_id = request.form.get('dag_id') dag_run_id = request.form.get('dag_run_id') confirmed = request.form.get('confirmed') == 'true' origin = get_safe_url(request.form.get('origin')) return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed, origin)
@pytest.mark.django_db @pytest.mark.job_runtime_vars
17,255
81,752
136
awx/main/tests/functional/api/test_job_runtime_params.py
70
20
def data_to_internal(data): internal = data.copy() if 'extra_vars' in data: internal['extra_vars'] = json.loads(data['extra_vars']) if 'credentials' in data: internal['credentials'] = set(Cr
JT param everything (#12646) * Making almost all fields promptable on job templates and config models * Adding EE, IG and label access checks * Changing jobs preferred instance group function to handle the new IG cache field * Adding new ask fields to job template modules * Address unit/functional tests * Adding migration file
data_to_internal
33c0fb79d66f56374d7c042ba79887faa85e2885
awx
test_job_runtime_params.py
13
15
https://github.com/ansible/awx.git
10
168
1
41
314
Python
{ "docstring": "\n returns internal representation, model objects, dictionaries, etc\n as opposed to integer primary keys and JSON strings\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 16 }
def data_to_internal(data): internal = data.copy() if 'extra_vars' in data: internal['extra_vars'] = json.loads(data['extra_vars']) if 'credentials' in data: internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials']) if 'inventory' in data: internal['inventory'] = Inventory.objects.get(pk=data['inventory']) if 'execution_environment' in data: internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment']) if 'labels' in data: internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']] if 'instance_groups' in data: internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']] return internal # End of setup, tests start here @pytest.mark.django_db @pytest.mark.job_runtime_vars
28,411
127,299
27
python/ray/tune/progress_reporter.py
11
5
def _generate_sys_info_str(*sys_info) -> str: if sys_info: return "<br>".join(sys_info).replace("\n", "<br>") return ""
[Tune] Add rich output for ray tune progress updates in notebooks (#26263) These changes are part of a series intended to improve integration with notebooks. This PR modifies the tune progress status shown to the user if tuning is run from a notebook. Previously, part of the trial progress was reported in an HTML table before; now, all progress is displayed in an organized HTML template. Signed-off-by: pdmurray <[email protected]>
_generate_sys_info_str
ffe12a5f103b9f06d728429fc0d930b76523726f
ray
progress_reporter.py
12
10
https://github.com/ray-project/ray.git
2
28
0
10
56
Python
{ "docstring": "Format system info into a string.\n *sys_info: System info strings to be included.\n\n Returns:\n Formatted string containing system information.\n ", "language": "en", "n_whitespaces": 39, "n_words": 19, "vocab_size": 17 }
def _generate_sys_info_str(*sys_info) -> str: if sys_info: return "<br>".join(sys_info).replace("\n", "<br>") return ""
82,758
278,926
66
keras/saving/saved_model/json_utils.py
23
10
def default(self, obj): if isinstance(obj, tf.TensorShape): items = obj.as_list() i
Remove pylint comments. PiperOrigin-RevId: 452353044
default
3613c3defc39c236fb1592c4f7ba1a9cc887343a
keras
json_utils.py
11
5
https://github.com/keras-team/keras.git
3
49
0
20
82
Python
{ "docstring": "Encodes objects for types that aren't handled by the default\n encoder.", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
def default(self, obj): if isinstance(obj, tf.TensorShape): items = obj.as_list() if obj.rank is not None else None return {"class_name": "TensorShape", "items": items} return get_json_type(obj)
async def _pause_and_wait_for_callback(self): """Send pause and wait for the pause callback to be received.""" self._pause_requested = True await self.async_media_pause() try:
106,437
307,669
37
homeassistant/components/forked_daapd/media_player.py
9
4
async def _pause_and_wait_for_callback(self): self._pause_requested = True await self.async_media_pause() try:
Use async_timeout in forked_daapd (#78451)
_pause_and_wait_for_callback
26251895295d74fcd2c73e37804c23675c433247
core
media_player.py
7
9
https://github.com/home-assistant/core.git
2
53
1
9
34
Python
{ "docstring": "Send pause and wait for the pause callback to be received.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
async def _pause_and_wait_for_callback(self): self._pause_requested = True await self.async_media_pause() try:
29,817
132,825
249
python/ray/tune/trainable.py
56
22
def delete_checkpoint(self, checkpoint_path): # Ensure TrialCheckpoints are converted if isinstance(checkpoint_path, TrialCheckpoint): checkpoint_path = checkpoint_path.local_path try: checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) except FileNotFoundError: # The checkpoint won't exist locally if the # trial was rescheduled to another worker. logger.debug(
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
delete_checkpoint
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
trainable.py
14
16
https://github.com/ray-project/ray.git
5
80
0
49
148
Python
{ "docstring": "Deletes local copy of checkpoint.\n\n Args:\n checkpoint_path (str): Path to checkpoint.\n ", "language": "en", "n_whitespaces": 36, "n_words": 11, "vocab_size": 10 }
def delete_checkpoint(self, checkpoint_path): # Ensure TrialCheckpoints are converted if isinstance(checkpoint_path, TrialCheckpoint): checkpoint_path = checkpoint_path.local_path try: checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) except FileNotFoundError: # The checkpoint won't exist locally if the # trial was rescheduled to another worker. logger.debug( f"Local checkpoint not found during garbage collection: " f"{self.trial_id} - {checkpoint_path}" ) return else: if self.uses_cloud_checkpointing: self.storage_client.delete(self._storage_path(checkpoint_dir)) if os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir)
29,987
133,356
585
python/ray/util/sgd/torch/torch_trainer.py
119
33
def _resize_worker_group(self, state_dict, max_retries=10): old_workers = self.worker_group.num_workers self.worker_group.reset() time.sleep(1) for i in range(max_retries): new_workers = self.worker_group.new_workers_size() if new_workers: self._last_resize = time.time() startup_success = self._start_workers(int(new_workers)) if not startup_success: logger.info( f"Worker startup failed. Retrying " f"{max_retries-i-1} more times." ) self.worker_group.reset() continue self.load_state_dict(state_dict, blocking=True) if self.use_local and new_workers == 1 and old_workers > 1: # Major hack. If we go from LocalDistributedRunner to a # standard TorchRunner we have to manually reset the # dummy actor handle global vars. # TODO(amog): Refactor LocalDistributedTorchRunner to # not use global variables for resource reservation. ray.util.sgd.torch.distributed_torch
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
_resize_worker_group
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
torch_trainer.py
18
26
https://github.com/ray-project/ray.git
7
169
0
92
294
Python
{ "docstring": "Resizes the number of remote workers based on available resources.\n Total number of workers will never exceed `num_workers` amount.\n\n Args:\n state_dict (dict): The state dict to load to all workers.\n max_retries (int): How many times to attempt to resize workers\n before failing.\n ", "language": "en", "n_whitespaces": 100, "n_words": 42, "vocab_size": 35 }
def _resize_worker_group(self, state_dict, max_retries=10): old_workers = self.worker_group.num_workers self.worker_group.reset() time.sleep(1) for i in range(max_retries): new_workers = self.worker_group.new_workers_size() if new_workers: self._last_resize = time.time() startup_success = self._start_workers(int(new_workers)) if not startup_success: logger.info( f"Worker startup failed. Retrying " f"{max_retries-i-1} more times." ) self.worker_group.reset() continue self.load_state_dict(state_dict, blocking=True) if self.use_local and new_workers == 1 and old_workers > 1: # Major hack. If we go from LocalDistributedRunner to a # standard TorchRunner we have to manually reset the # dummy actor handle global vars. # TODO(amog): Refactor LocalDistributedTorchRunner to # not use global variables for resource reservation. ray.util.sgd.torch.distributed_torch_runner._dummy_cuda_actor = None ray.util.sgd.torch.distributed_torch_runner._dummy_cpu_actor = None return else: delay = 2 ** i logger.warning("No new workers found. Retrying in %d sec." % delay) time.sleep(delay) raise RuntimeError("Exceeded max_retries for relaunching workers.")