id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
259,364
62
15
19
288
20
0
96
347
_make_estimator
API Deprecate max_feature=`auto` for tree classes (#22476) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _make_estimator(self, append=True, random_state=None): estimator = clone(self.base_estimator_) estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params}) # TODO: Remove in v1.2 # criterion "mse" and "mae" would cause warnings in every call to # DecisionTreeRegressor.fit(..) if isinstance(estimator, (DecisionTreeRegressor, ExtraTreeRegressor)): if getattr(estimator, "criterion", None) == "mse": estimator.set_params(criterion="squared_error") elif getattr(estimator, "criterion", None) == "mae": estimator.set_params(criterion="absolute_error") # TODO(1.3): Remove # max_features = 'auto' would cause warnings in every call to # Tree.fit(..) if isinstance(estimator, BaseDecisionTree): if getattr(estimator, "max_features", None) == "auto": if isinstance(estimator, DecisionTreeClassifier): estimator.set_params(max_features="sqrt") elif isinstance(estimator, DecisionTreeRegressor): estimator.set_params(max_features=1.0) if random_state is not None: _set_random_states(estimator, random_state) if append: self.estimators_.append(estimator) return estimator
175
_base.py
Python
sklearn/ensemble/_base.py
e5736afb316038c43301d2c53ce39f9a89b64495
scikit-learn
11
28,221
2
6
9
12
1
0
2
5
test_flat_concat_drop_exceeding_count_no_silently_fail
Limit the maximum number of search vectors to generate per index (#10284) This fixes a crash when multiple thousand values are to be indexed due to PostgreSQL rejecting the statement. Such crash would look like this: ``` django.db.utils.OperationalError: stack depth limit exceeded HINT: Increase the configuration parameter "max_stack_depth" (currently 2048kB), after ensuring the platform's stack depth limit is adequate. ```
https://github.com/saleor/saleor.git
def test_flat_concat_drop_exceeding_count_no_silently_fail():
85
test_postgresql_search.py
Python
saleor/core/tests/test_postgresql_search.py
e5873b338db3a24afee0a7ae7bd7fffe09397ee4
saleor
1
51,540
25
14
11
138
17
0
33
102
postprocess
update efficientnetb0_imagenet (#2041) * update efficientnetb0_imagenet * remove unused print
https://github.com/PaddlePaddle/PaddleHub.git
def postprocess(data_out, label_list, top_k): output = [] for result in data_out: result_i = softmax(result) output_i = {} indexs = np.argsort(result_i)[::-1][0:top_k] for index in indexs: label = label_list[index].split(',')[0] output_i[label] = float(result_i[index]) output.append(output_i) return output
86
processor.py
Python
modules/image/classification/efficientnetb0_imagenet/processor.py
7cd67aba38c19a835c3229d9b4be21798c5c8673
PaddleHub
3
259,004
175
16
68
694
53
0
329
1,288
fit
ENH Allow prefit in stacking (#22215) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Siqi He <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y, sample_weight=None): # all_estimators contains all estimators, the one to be fitted and the # 'drop' string. names, all_estimators = self._validate_estimators() self._validate_final_estimator() stack_method = [self.stack_method] * len(all_estimators) if self.cv == "prefit": self.estimators_ = [] for estimator in all_estimators: if estimator != "drop": check_is_fitted(estimator) self.estimators_.append(estimator) else: # Fit the base estimators on the whole training data. Those # base estimators will be used in transform, predict, and # predict_proba. They are exposed publicly. self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_single_estimator)(clone(est), X, y, sample_weight) for est in all_estimators if est != "drop" ) self.named_estimators_ = Bunch() est_fitted_idx = 0 for name_est, org_est in zip(names, all_estimators): if org_est != "drop": current_estimator = self.estimators_[est_fitted_idx] self.named_estimators_[name_est] = current_estimator est_fitted_idx += 1 if hasattr(current_estimator, "feature_names_in_"): self.feature_names_in_ = current_estimator.feature_names_in_ else: self.named_estimators_[name_est] = "drop" self.stack_method_ = [ self._method_name(name, est, meth) for name, est, meth in zip(names, all_estimators, stack_method) ] if self.cv == "prefit": # Generate predictions from prefit models predictions = [ getattr(estimator, predict_method)(X) for estimator, predict_method in zip(all_estimators, self.stack_method_) if estimator != "drop" ] else: # To train the meta-classifier using the most data as possible, we use # a cross-validation to obtain the output of the stacked estimators. # To ensure that the data provided to each estimator are the same, # we need to set the random state of the cv if there is one and we # need to take a copy. cv = check_cv(self.cv, y=y, classifier=is_classifier(self)) if hasattr(cv, "random_state") and cv.random_state is None: cv.random_state = np.random.RandomState() fit_params = ( {"sample_weight": sample_weight} if sample_weight is not None else None ) predictions = Parallel(n_jobs=self.n_jobs)( delayed(cross_val_predict)( clone(est), X, y, cv=deepcopy(cv), method=meth, n_jobs=self.n_jobs, fit_params=fit_params, verbose=self.verbose, ) for est, meth in zip(all_estimators, self.stack_method_) if est != "drop" ) # Only not None or not 'drop' estimators will be used in transform. # Remove the None from the method as well. self.stack_method_ = [ meth for (meth, est) in zip(self.stack_method_, all_estimators) if est != "drop" ] X_meta = self._concatenate_predictions(X, predictions) _fit_single_estimator( self.final_estimator_, X_meta, y, sample_weight=sample_weight ) return self
439
_stacking.py
Python
sklearn/ensemble/_stacking.py
691972a7cf04e7a8918b907556b4e9904f82bd0c
scikit-learn
20
31,489
19
12
9
61
7
0
22
80
unk_token
Fix properties of unset special tokens in non verbose mode (#17797) Co-authored-by: SaulLu <[email protected]>
https://github.com/huggingface/transformers.git
def unk_token(self) -> str: if self._unk_token is None: if self.verbose: logger.error("Using unk_token, but it is not set yet.") return None return str(self._unk_token)
35
tokenization_utils_base.py
Python
src/transformers/tokenization_utils_base.py
3eed5530ec74bb60ad9f8f612717d0f6ccf820f2
transformers
3
278,518
15
13
10
102
14
0
21
98
distributions_and_v1_optimizers
let the linter ignore certain lines, prepare to enforce line length
https://github.com/keras-team/keras.git
def distributions_and_v1_optimizers(): return tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.one_device_strategy, tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501 tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501 ], optimizer_fn=optimizers_v1, )
66
optimizer_combinations.py
Python
keras/distribute/optimizer_combinations.py
7c46f914413fafe472e0c577ecb10e310543cd50
keras
1
296,195
20
9
8
88
12
0
24
81
frontend_stream_type
Replace Camera STREAM_ constants with StreamType enum (#69871)
https://github.com/home-assistant/core.git
def frontend_stream_type(self) -> StreamType | None: if CameraLiveStreamTrait.NAME not in self._device.traits: return None trait = self._device.traits[CameraLiveStreamTrait.NAME] if StreamingProtocol.WEB_RTC in trait.supported_protocols: return StreamType.WEB_RTC return super().frontend_stream_type
55
camera_sdm.py
Python
homeassistant/components/nest/camera_sdm.py
c93c7e8eff466624aa492011f157e64e50fed728
core
3
140,169
11
9
10
50
9
0
11
43
_execute_impl
[Serve][Deployment Graph][Perf] Add minimal executor DAGNode (#24754) closes #24475 Current deployment graph has big perf issues compare with using plain deployment handle, mostly because overhead of DAGNode traversal mechanism. We need this mechanism to empower DAG API, specially deeply nested objects in args where we rely on pickling; But meanwhile the nature of each execution becomes re-creating and replacing every `DAGNode` instances involved upon each execution, that incurs overhead. Some overhead is inevitable due to pickling and executing DAGNode python code, but they could be quite minimal. As I profiled earlier, pickling itself is quite fast for our benchmarks at magnitude of microseconds. Meanwhile the elephant in the room is DeploymentNode and its relatives are doing too much work in constructor that's beyond necessary, thus slowing everything down. So the fix is as simple as 1) Introduce a new set of executor dag node types that contains absolute minimal information that only preserves the DAG structure with traversal mechanism, and ability to call relevant deployment handles. 2) Add a simple new pass in our build() that generates and replaces nodes with executor dag to produce a final executor dag to run the graph. Current ray dag -> serve dag mixed a lot of stuff related to deployment generation and init args, in longer term we should remove them but our correctness depends on it so i rather leave it as separate PR. ### Current 10 node chain with deployment graph `.bind()` ``` chain_length: 10, num_clients: 1 latency_mean_ms: 41.05, latency_std_ms: 15.18 throughput_mean_tps: 27.5, throughput_std_tps: 3.2 ``` ### Using raw deployment handle without dag overhead ``` chain_length: 10, num_clients: 1 latency_mean_ms: 20.39, latency_std_ms: 4.57 throughput_mean_tps: 51.9, throughput_std_tps: 1.04 ``` ### After this PR: ``` chain_length: 10, num_clients: 1 latency_mean_ms: 20.35, latency_std_ms: 0.87 throughput_mean_tps: 48.4, throughput_std_tps: 1.43 ```
https://github.com/ray-project/ray.git
def _execute_impl(self, *args, **kwargs) -> ObjectRef: return self._deployment_function_handle.remote( *self._bound_args, **self._bound_kwargs )
31
deployment_function_executor_node.py
Python
python/ray/serve/deployment_function_executor_node.py
f27e85cd7df5ca2873ef6231200a1530e16ac35d
ray
1
37,288
6
6
8
22
5
0
6
20
default_num_choices
Add onnx export of models with a multiple choice classification head (#16758) * Add export of models with a multiple-choice classification head
https://github.com/huggingface/transformers.git
def default_num_choices(self) -> int: return OnnxConfig.default_fixed_num_choices
12
config.py
Python
src/transformers/onnx/config.py
77de8d6c31dbf756e7b2495e272efc0b92927fc3
transformers
1
100,394
31
13
14
193
16
0
43
161
compile_timelapse_sample
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def compile_timelapse_sample(self): batchsizes = [] samples = {} images = {} masks = {} for side in ("a", "b"): batch = next(self._display_feeds["timelapse"][side]) batchsizes.append(len(batch["samples"])) samples[side] = batch["samples"] images[side] = batch["targets"][-1] masks[side] = batch["masks"] batchsize = min(batchsizes) sample = self.compile_sample(batchsize, samples=samples, images=images, masks=masks) return sample
116
_base.py
Python
plugins/train/trainer/_base.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
2
181,873
27
15
11
152
17
0
33
110
auto_select_categorical_features
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def auto_select_categorical_features(X, threshold=10): feature_mask = [] for column in range(X.shape[1]): if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] unique = np.unique(X.data[indptr_start:indptr_end]) else: unique = np.unique(X[:, column]) feature_mask.append(len(unique) <= threshold) return feature_mask
96
one_hot_encoder.py
Python
tpot/builtins/one_hot_encoder.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
3
90,226
47
14
15
144
24
0
66
249
auto_add_recovery_codes
ref(hybrid-cloud): Additional test annotations: auth_index (#42425) Extends the hybrid cloud auth service to be usable in many more places ( TY @corps) Annotate 30+ more api endpoint tests Co-authored-by: Mike Ihbe <[email protected]> Co-authored-by: Zachary Collins <[email protected]> Co-authored-by: Zach Collins <[email protected]>
https://github.com/getsentry/sentry.git
def auto_add_recovery_codes(self, user, force=False): from sentry.auth.authenticators.recovery_code import RecoveryCodeInterface has_authenticators = False # If we're not forcing, check for a backup interface already setup # or if it's missing, we'll need to set it. if not force: for authenticator in Authenticator.objects.filter( user_id=user.id, type__in=[a.type for a in available_authenticators()] ): iface = authenticator.interface if iface.is_backup_interface: return has_authenticators = True if has_authenticators or force: interface = RecoveryCodeInterface() interface.enroll(user) return interface
91
authenticator.py
Python
src/sentry/models/authenticator.py
17644550024d6a2eb01356ee48ec0d3ef95c043d
sentry
7
169,356
18
8
5
33
6
0
21
51
size
TYP: Fix typing errors caused by new numpy (#48850)
https://github.com/pandas-dev/pandas.git
def size(self) -> int: # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value]
17
base.py
Python
pandas/core/arrays/base.py
336896907748389e2cd0d57504508475d425348e
pandas
1
38,881
97
20
47
638
36
0
163
1,029
test_batch_encode_dynamic_overflowing
Add LayoutLMv3 (#17060) * Make forward pass work * More improvements * Remove unused imports * Remove timm dependency * Improve loss calculation of token classifier * Fix most tests * Add docs * Add model integration test * Make all tests pass * Add LayoutLMv3FeatureExtractor * Improve integration test + make fixup * Add example script * Fix style * Add LayoutLMv3Processor * Fix style * Add option to add visual labels * Make more tokenizer tests pass * Fix more tests * Make more tests pass * Fix bug and improve docs * Fix import of processors * Improve docstrings * Fix toctree and improve docs * Fix auto tokenizer * Move tests to model folder * Move tests to model folder * change default behavior add_prefix_space * add prefix space for fast * add_prefix_spcae set to True for Fast * no space before `unique_no_split` token * add test to hightligh special treatment of added tokens * fix `test_batch_encode_dynamic_overflowing` by building a long enough example * fix `test_full_tokenizer` with add_prefix_token * Fix tokenizer integration test * Make the code more readable * Add tests for LayoutLMv3Processor * Fix style * Add model to README and update init * Apply suggestions from code review * Replace asserts by value errors * Add suggestion by @ducviet00 * Add model to doc tests * Simplify script * Improve README * a step ahead to fix * Update pair_input_test * Make all tokenizer tests pass - phew * Make style * Add LayoutLMv3 to CI job * Fix auto mapping * Fix CI job name * Make all processor tests pass * Make tests of LayoutLMv2 and LayoutXLM consistent * Add copied from statements to fast tokenizer * Add copied from statements to slow tokenizer * Remove add_visual_labels attribute * Fix tests * Add link to notebooks * Improve docs of LayoutLMv3Processor * Fix reference to section Co-authored-by: SaulLu <[email protected]> Co-authored-by: Niels Rogge <[email protected]>
https://github.com/huggingface/transformers.git
def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" else: returned_tensor = "jax" # Single example words = ["HuggingFace", "is", "solving", "NLP", "one", "commit", "at", "a", "time"] boxes = [[i, i, i, i] for i in range(len(words))] tokens = tokenizer.encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) else: self.assertEqual(len(tokens[key].shape), 3) # Batch of examples # For these 2 examples, 3 training examples will be created words_batched = [ ["HuggingFace", "is", "solving", "NLP", "one", "commit", "at", "a", "time"], ["Very", "tiny", "input"], ] boxes_batched = [[[i, i, i, i] for i in range(len(words_item))] for words_item in words_batched] tokens = tokenizer.batch_encode_plus( words_batched, boxes=boxes_batched, max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) else: self.assertEqual(len(tokens[key].shape), 3) self.assertEqual(tokens[key].shape[-1], 4)
383
test_tokenization_layoutlmv3.py
Python
tests/models/layoutlmv3/test_tokenization_layoutlmv3.py
31ee80d55673f32c0f5d50936f371e661b74b21a
transformers
11
149,955
37
18
12
214
21
0
47
209
create_follower_dict
rehaul of backend data management - increasing performance by holding history in memory, reducing load on the ratelimit by only pinging exchange once per candle. Improve code readability.
https://github.com/freqtrade/freqtrade.git
def create_follower_dict(self): follower_name = self.config.get('bot_name', 'follower1') whitelist_pairs = self.config.get('exchange', {}).get('pair_whitelist') exists = Path(self.full_path / str('follower_dictionary-' + follower_name + '.json')).resolve().exists() if exists: logger.info('Found an existing follower dictionary') for pair in whitelist_pairs: self.follower_dict[pair] = {} with open(self.full_path / str('follower_dictionary-' + follower_name + '.json'), "w") as fp: json.dump(self.follower_dict, fp, default=self.np_encoder)
121
data_drawer.py
Python
freqtrade/freqai/data_drawer.py
16b4a5b71ff140f5de31e5d5572f1f193457cf6b
freqtrade
3
158,217
18
14
5
91
7
0
25
47
voc_label_indices
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
https://github.com/d2l-ai/d2l-zh.git
def voc_label_indices(colormap, colormap2label): colormap = colormap.astype(np.int32) idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256 + colormap[:, :, 2]) return colormap2label[idx]
59
mxnet.py
Python
d2l/mxnet.py
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
1
104,391
77
12
40
164
14
0
104
264
subsplit
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
https://github.com/huggingface/datasets.git
def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name # Note that the percent kwargs redefine the outer name datasets.percent. This # is done for consistency (.subsplit(percent=datasets.percent[:40])) if sum(bool(x) for x in (arg, k, percent, weighted)) != 1: raise ValueError("Only one argument of subsplit should be set.") # Auto deduce k if isinstance(arg, int): k = arg elif isinstance(arg, slice): percent = arg elif isinstance(arg, list): weighted = arg if not (k or percent or weighted): raise ValueError( f"Invalid split argument {arg}. Only list, slice and int supported. " "One of k, weighted or percent should be set to a non empty value." )
285
splits.py
Python
src/datasets/splits.py
e35be138148333078284b942ccc9ed7b1d826f97
datasets
17
38,308
53
12
31
220
33
0
61
343
test_xsum_summarization_same_as_fairseq
Black preview (#17217) * Black preview * Fixup too! * Fix check copies * Use the same version as the CI * Bump black
https://github.com/huggingface/transformers.git
def test_xsum_summarization_same_as_fairseq(self): model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-xsum").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = EXPECTED_SUMMARY = ( "California's largest power company has begun shutting off electricity to thousands of customers in the" " state." ) dct = tok.batch_encode_plus( [PGE_ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, max_length=62, min_length=11, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True, decoder_start_token_id=model.config.eos_token_id, ) decoded = tok.batch_decode( hypotheses_batch, skip_special_tokens=True, ) self.assertEqual(EXPECTED_SUMMARY, decoded[0])
143
test_modeling_bart.py
Python
tests/models/bart/test_modeling_bart.py
afe5d42d8d1d80af911ed980c2936bfe887078f6
transformers
1
47,646
60
15
35
383
40
0
85
430
test_branch_list_with_dag_run
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_branch_list_with_dag_run(self, mock_get_db_hook): branch_op = BranchSQLOperator( task_id="make_choice", conn_id="mysql_default", sql="SELECT 1", follow_task_ids_if_true=["branch_1", "branch_2"], follow_task_ids_if_false="branch_3", dag=self.dag, ) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.branch_3 = EmptyOperator(task_id="branch_3", dag=self.dag) self.branch_3.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun( run_id="manual__", start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING, ) mock_get_records = mock_get_db_hook.return_value.get_first mock_get_records.return_value = [["1"]] branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) tis = dr.get_task_instances() for ti in tis: if ti.task_id == "make_choice": assert ti.state == State.SUCCESS elif ti.task_id == "branch_1": assert ti.state == State.NONE elif ti.task_id == "branch_2": assert ti.state == State.NONE elif ti.task_id == "branch_3": assert ti.state == State.SKIPPED else: raise ValueError(f"Invalid task id {ti.task_id} found!")
229
test_sql.py
Python
tests/operators/test_sql.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
6
320,768
4
9
2
30
4
0
4
18
_model
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
https://github.com/qutebrowser/qutebrowser.git
def _model(self): return self._completion().model()
16
completer.py
Python
qutebrowser/completion/completer.py
a20bb67a878b2e68abf8268c1b0a27f018d01352
qutebrowser
1
93,002
9
9
16
39
5
0
9
18
parametrize_backend
fix(snuba): Add appropriate `UseCaseKey` for indexer [TET-146] (#36308) * fix(snuba): Add appropriate `UseCaseKey` for indexer Update indexer invocation call to have the appropriate `UseCaseKey` depending on use case. In `src/sentry/sentry_metrics/indexer/base.py::StringIndexer` when using `resolve` and `reverse_resolve` callers should not rely on the default use_case_id. Important changes: - Add required parameter `use_case_id: UseCaseKey` to `get_series` from `src/sentry/snuba/metrics/datasource.py#L612`; - Add required parameter to `get_metrics` in `src/sentry/snuba/metrics/datasource.py` - Add required parameter to `get_tags` in `src/sentry/snuba/metrics/datasource.py` - Add required parameter to `get_tag_values` in `src/sentry/snuba/metrics/datasource.py`
https://github.com/getsentry/sentry.git
def parametrize_backend(cls): assert not hasattr(cls, "backend") cls.backend = SessionsReleaseHealthBackend()
104
test_sessions.py
Python
tests/snuba/sessions/test_sessions.py
cd803d173c72b64d06c0687170bf9a945d0b503c
sentry
1
26,573
59
12
28
210
25
0
86
216
fetch_jwks
Make OIDC plugin public (#9406) * Make OIDC plugin public * Add missing dependency package * Apply changes after review * Update changelog * Apply changes after review * Add const file
https://github.com/saleor/saleor.git
def fetch_jwks(jwks_url) -> Optional[dict]: response = None try: response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT) response.raise_for_status() jwks = response.json() except requests.exceptions.RequestException: logger.exception("Unable to fetch jwks from %s", jwks_url) raise AuthenticationError("Unable to finalize the authentication process.") except json.JSONDecodeError: content = response.content if response else "Unable to find the response" logger.exception( "Unable to decode the response from auth service with jwks. " "Response: %s", content, ) raise AuthenticationError("Unable to finalize the authentication process.") keys = jwks.get("keys", []) if not keys: logger.warning("List of JWKS keys is empty") cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME) return keys
122
utils.py
Python
saleor/plugins/openid_connect/utils.py
7d2e77c5f235ca60a2bf3ee02f4f9a8b10b03214
saleor
5
120,553
47
12
15
136
14
0
50
90
verify_mac_libraries_dont_reference_chkstack
feat: refactor code using pyupgrade This PR upgrades legacy Python code to 3.7+ code using pyupgrade: ```sh pyupgrade --py37-plus --keep-runtime-typing **.py ``` a
https://github.com/google/jax.git
def verify_mac_libraries_dont_reference_chkstack(): if not _is_mac(): return nm = subprocess.run( ["nm", "-g", r.Rlocation("org_tensorflow/tensorflow/compiler/xla/python/xla_extension.so") ], capture_output=True, text=True, check=False) if nm.returncode != 0: raise RuntimeError(f"nm process failed: {nm.stdout} {nm.stderr}") if "____chkstk_darwin" in nm.stdout: raise RuntimeError( "Mac wheel incorrectly depends on symbol ____chkstk_darwin, which " "means that it isn't compatible with older MacOS versions.")
69
build_wheel.py
Python
build/build_wheel.py
17de89b16ac5ee05aee03115d858e67489eab973
jax
4
77,580
40
15
51
223
27
0
51
337
test_title_column
Allow passing arbitrary link attributes to TitleColumn
https://github.com/wagtail/wagtail.git
def test_title_column(self): root_page = Page.objects.filter(depth=2).first() blog = Site.objects.create( hostname="blog.example.com", site_name="My blog", root_page=root_page ) gallery = Site.objects.create( hostname="gallery.example.com", site_name="My gallery", root_page=root_page ) data = [blog, gallery] table = Table( [ TitleColumn( "hostname", url_name="wagtailsites:edit", link_classname="choose-site", link_attrs={"data-chooser": "yes"}, ), Column("site_name", label="Site name"), ], data, ) html = self.render_component(table) self.assertHTMLEqual( html, % (blog.pk, gallery.pk), )
136
test_tables.py
Python
wagtail/admin/tests/ui/test_tables.py
5994cc43dfc5cc1ed891ab78eff3a3bcf56f6830
wagtail
1
36,915
35
9
3
65
10
1
38
56
_set_gradient_checkpointing
RegNet (#16188) * base model done * make style * done * added files * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Trigger doc build * resolved conversations * resolved conversations * seer models * minor changes * minor changes * make fixup * glob variables * minor changes * fix copies * config when possibile * resolved conflicts * resolved conflicts * resolved conflicts * CI * conversion script for 10b param * fixed for 10b model * minor updates in the doc + make style * removed unused code * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * removed unused code * removed unused code * updated modeling_utils from main Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
https://github.com/huggingface/transformers.git
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, RegNetModel): module.gradient_checkpointing = value REGNET_START_DOCSTRING = r REGNET_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", REGNET_START_DOCSTRING, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
@add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", REGNET_START_DOCSTRING, )
24
modeling_regnet.py
Python
src/transformers/models/regnet/modeling_regnet.py
af14c61973effd8b8077ac61b3f24bdd4a632f25
transformers
2
157,142
76
15
32
376
32
0
119
548
_loc
Generalize array checking and remove `pd.Index` call in `_get_partitions` (#9634)
https://github.com/dask/dask.git
def _loc(self, iindexer, cindexer): if isinstance(iindexer, Series): return self._loc_series(iindexer, cindexer) elif isinstance(iindexer, Array): return self._loc_array(iindexer, cindexer) elif callable(iindexer): return self._loc(iindexer(self.obj), cindexer) if self.obj.known_divisions: iindexer = self._maybe_partial_time_string(iindexer) if isinstance(iindexer, slice): return self._loc_slice(iindexer, cindexer) elif is_series_like(iindexer) and not is_bool_dtype(iindexer.dtype): return self._loc_list(iindexer.values, cindexer) elif isinstance(iindexer, list) or is_arraylike(iindexer): return self._loc_list(iindexer, cindexer) else: # element should raise KeyError return self._loc_element(iindexer, cindexer) else: if isinstance(iindexer, (list, np.ndarray)) or ( is_series_like(iindexer) and not is_bool_dtype(iindexer.dtype) ): # applying map_partitions to each partition # results in duplicated NaN rows msg = ( "Cannot index with list against unknown division. " "Try setting divisions using ``ddf.set_index``" ) raise KeyError(msg) elif not isinstance(iindexer, slice): iindexer = slice(iindexer, iindexer) meta = self._make_meta(iindexer, cindexer) return self.obj.map_partitions( methods.try_loc, iindexer, cindexer, meta=meta )
242
indexing.py
Python
dask/dataframe/indexing.py
8be183c570dd953aa16d790afb709786b3d7cbf8
dask
14
293,763
44
11
42
448
17
0
117
343
test_lazy_state_handles_same_last_updated_and_last_changed
Separate attrs into another table (reduces database size) (#68224)
https://github.com/home-assistant/core.git
async def test_lazy_state_handles_same_last_updated_and_last_changed(caplog): now = datetime(2021, 6, 12, 3, 4, 1, 323, tzinfo=dt_util.UTC) row = PropertyMock( entity_id="sensor.valid", state="off", shared_attrs='{"shared":true}', last_updated=now, last_changed=now, ) lstate = LazyState(row) assert lstate.as_dict() == { "attributes": {"shared": True}, "entity_id": "sensor.valid", "last_changed": "2021-06-12T03:04:01.000323+00:00", "last_updated": "2021-06-12T03:04:01.000323+00:00", "state": "off", } assert lstate.last_updated == row.last_updated assert lstate.last_changed == row.last_changed assert lstate.as_dict() == { "attributes": {"shared": True}, "entity_id": "sensor.valid", "last_changed": "2021-06-12T03:04:01.000323+00:00", "last_updated": "2021-06-12T03:04:01.000323+00:00", "state": "off", } lstate.last_updated = datetime(2020, 6, 12, 3, 4, 1, 323, tzinfo=dt_util.UTC) assert lstate.as_dict() == { "attributes": {"shared": True}, "entity_id": "sensor.valid", "last_changed": "2021-06-12T03:04:01.000323+00:00", "last_updated": "2020-06-12T03:04:01.000323+00:00", "state": "off", } lstate.last_changed = datetime(2020, 6, 12, 3, 4, 1, 323, tzinfo=dt_util.UTC) assert lstate.as_dict() == { "attributes": {"shared": True}, "entity_id": "sensor.valid", "last_changed": "2020-06-12T03:04:01.000323+00:00", "last_updated": "2020-06-12T03:04:01.000323+00:00", "state": "off", }
261
test_models.py
Python
tests/components/recorder/test_models.py
9215702388eef03c7c3ed9f756ea0db533d5beec
core
1
101,335
20
10
12
107
12
0
24
88
terminate_queues
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
https://github.com/deepfakes/faceswap.git
def terminate_queues(self) -> None: logger.debug("QueueManager terminating all queues") self.shutdown.set() self._flush_queues() for q_name, queue in self.queues.items(): logger.debug("QueueManager terminating: '%s'", q_name) queue.put("EOF") logger.debug("QueueManager terminated all queues")
59
queue_manager.py
Python
lib/queue_manager.py
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
2
275,526
5
6
2
18
3
0
5
19
_transform_loss
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _transform_loss(self, loss): return loss
10
optimizer_v2.py
Python
keras/optimizers/optimizer_v2/optimizer_v2.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
98,544
6
6
2
20
5
0
6
20
get_next_event_id
ref: clean up sentry flake8 plugin (#33847) * fix: Remove unused `# noqa` lint disable comments * ref: clean up sentry flake8 plugin - remove S005: pyupgrade handles this for us - remove `pycodestyle` handling: flake8 does this natively - clean up the ignore list and use extend-ignore
https://github.com/getsentry/sentry.git
def get_next_event_id(self, event, snuba_filter): raise NotImplementedError
12
base.py
Python
src/sentry/eventstore/base.py
94c896a4a3663abbd31775957f1aa5448fde5491
sentry
1
269,617
11
7
2
67
12
1
11
14
prod
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def prod(x, axis=None, keepdims=False): return tf.reduce_prod(x, axis, keepdims) @keras_export("keras.backend.cumsum") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
@keras_export("keras.backend.cumsum") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
25
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
111,194
31
12
9
175
22
0
42
69
get_characters_loss
Auto-format code with black (#10209) * Auto-format code with black * add black requirement to dev dependencies and pin to 22.x * ignore black dependency for comparison with setup.cfg Co-authored-by: explosion-bot <[email protected]> Co-authored-by: svlandeg <[email protected]>
https://github.com/explosion/spaCy.git
def get_characters_loss(ops, docs, prediction, nr_char): target_ids = numpy.vstack([doc.to_utf8_array(nr_char=nr_char) for doc in docs]) target_ids = target_ids.reshape((-1,)) target = ops.asarray(to_categorical(target_ids, n_classes=256), dtype="f") target = target.reshape((-1, 256 * nr_char)) diff = prediction - target loss = (diff**2).sum() d_target = diff / float(prediction.shape[0]) return loss, d_target
112
multi_task.py
Python
spacy/ml/models/multi_task.py
91ccacea12a46c62ccb5e7f6de891a37cb71e184
spaCy
2
259,914
9
11
21
45
7
0
9
24
get_conda_environment_content
CI: move Linux and MacOS Azure builds to conda lock files (#22448) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def get_conda_environment_content(build_metadata): template = environment.from_string( .strip() ) return template.render(build_metadata=build_metadata)
26
update_environments_and_lock_files.py
Python
build_tools/azure/update_environments_and_lock_files.py
f862129f36786acbae3d9f2d161bbb72d77b87ec
scikit-learn
1
266,001
11
13
2
66
7
0
11
17
title
Closes #10698: Omit app label from content type in table columns
https://github.com/netbox-community/netbox.git
def title(value): return ' '.join([w[0].upper() + w[1:] for w in str(value).split()])
39
utils.py
Python
netbox/utilities/utils.py
0ad7ae28377f44ff7d1ed119a6ff7a8f43bf8e91
netbox
2
154,522
11
10
6
51
8
0
11
65
get_key
REFACTOR-#5009: use RayWrapper.materialize instead of ray.get (#5010) Signed-off-by: Myachev <[email protected]>
https://github.com/modin-project/modin.git
def get_key(self): return ( RayWrapper.materialize(self.key) if isinstance(self.key, ray.ObjectRef) else self.key )
32
partition.py
Python
modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py
1dc16415333bf2428ee2b1f4d31ff94e66b9a0a6
modin
2
181,640
9
8
4
40
6
0
10
22
test_CategoricalSelector_fit
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def test_CategoricalSelector_fit(): op = CategoricalSelector() ret_op = op.fit(iris_data) assert ret_op==op
22
feature_transformers_tests.py
Python
tests/feature_transformers_tests.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
1
155,537
115
15
51
391
43
0
153
807
shift
Add groupby shift method (#8522) Implements the shift `method` following the `transform` and `apply` methods.
https://github.com/dask/dask.git
def shift(self, periods=1, freq=None, axis=0, fill_value=None, meta=no_default): if meta is no_default: with raise_on_meta_error("groupby.shift()", udf=False): meta_kwargs = _extract_meta( { "periods": periods, "freq": freq, "axis": axis, "fill_value": fill_value, }, nonempty=True, ) meta = self._meta_nonempty.shift(**meta_kwargs) msg = ( "`meta` is not specified, inferred from partial data. " "Please provide `meta` if the result is unexpected.\n" " Before: .shift(1)\n" " After: .shift(1, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n" " or: .shift(1, meta=('x', 'f8')) for series result" ) warnings.warn(msg, stacklevel=2) meta = make_meta(meta, parent_meta=self._meta.obj) # Validate self.by if isinstance(self.by, list) and any( isinstance(item, Series) for item in self.by ): raise NotImplementedError( "groupby-shift with a multiple Series is currently not supported" ) df = self.obj should_shuffle = not (df.known_divisions and df._contains_index_name(self.by)) if should_shuffle: df2, by = self._shuffle(meta) else: df2 = df by = self.by # Perform embarrassingly parallel groupby-shift result = map_partitions( _groupby_slice_shift, df2, by, self._slice, periods=periods, freq=freq, axis=axis, fill_value=fill_value, token="groupby-shift", group_keys=self.group_keys, meta=meta, **self.observed, **self.dropna, ) return result
246
groupby.py
Python
dask/dataframe/groupby.py
336aac39ee8a616ac2645e532392123ae1bfddd1
dask
7
273,581
65
17
28
212
18
0
105
441
_fix_unknown_dimension
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _fix_unknown_dimension(self, input_shape, output_shape): output_shape = list(output_shape) msg = ( "total size of new array must be unchanged, " "input_shape = {}, output_shape = {}".format( input_shape, output_shape ) ) known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError( f"There must be at most one unknown dimension in output_shape. " f"Received: output_shape={output_shape}." ) else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape
128
reshape.py
Python
keras/layers/reshaping/reshape.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
8
37,344
28
10
12
114
12
0
31
131
test_small_model_long_context_cls_slow
Long QuestionAnsweringPipeline fix. (#16778) * Temporary commit witht the long QA fix. * Adding slow tests covering this fix. * Removing fast test as it doesn't fail anyway.
https://github.com/huggingface/transformers.git
def test_small_model_long_context_cls_slow(self): question_answerer = pipeline( "question-answering", model="deepset/roberta-base-squad2", handle_impossible_answer=True, max_seq_length=512, ) outputs = question_answerer( question="What country is Paris the capital of?", context=, ) self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""})
66
test_pipelines_question_answering.py
Python
tests/pipelines/test_pipelines_question_answering.py
6620f60c0abb56d441ed3ef627d9a87f27dba479
transformers
1
132,846
17
14
9
112
17
0
17
100
add_trial
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def add_trial(self, trial): self._trials.append(trial) if trial.status != Trial.TERMINATED: self._live_trials.add(trial) with warn_if_slow("scheduler.on_trial_add"): self._scheduler_alg.on_trial_add( TrialRunnerWrapper(self, runner_whitelist_attr={"search_alg"}), trial ) self.trial_executor.mark_trial_to_checkpoint(trial)
66
trial_runner.py
Python
python/ray/tune/trial_runner.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
268,015
4
6
2
23
5
0
4
11
get_controller_target_connections
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def get_controller_target_connections(self) -> t.List[SshConnection]:
13
host_profiles.py
Python
test/lib/ansible_test/_internal/host_profiles.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
1
162,026
6
8
33
30
6
0
6
20
unify
Update URLs in error messages to refer to RTD docs. As title. This replaces references to documentation at numba.pydata.org with numba.readthedocs.io. It also fixes URLs that were invalid.
https://github.com/numba/numba.git
def unify(self, raise_errors=True): typdict = utils.UniqueDict()
193
typeinfer.py
Python
numba/core/typeinfer.py
fc1e0cef7f574826114185a27d0686f527b7ffaf
numba
11
46,790
11
12
4
58
7
0
11
27
commit_sha
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
https://github.com/apache/airflow.git
def commit_sha(): return run_command( ['git', 'rev-parse', 'HEAD'], capture_output=True, text=True, check=False ).stdout.strip()
34
run_utils.py
Python
dev/breeze/src/airflow_breeze/utils/run_utils.py
4ffd4f09532fceb67675fce4c1f5cd383eff992e
airflow
1
172,905
219
23
124
1,748
115
1
396
2,178
render_adv_search_results
Refactored load read status for web access and opds access Refactored and removed discover html page Bugfix show author Bugfix open dialog in author page Fix for #2341 (advanced search with linked read column and read column having a higher number than number of available custom columns)
https://github.com/janeczku/calibre-web.git
def render_adv_search_results(term, offset=None, order=None, limit=None): sort_param = order[0] if order else [db.Books.sort] pagination = None cc = get_cc_columns(filter_config_custom_read=True) calibre_db.session.connection().connection.connection.create_function("lower", 1, db.lcase) query = calibre_db.generate_linked_query(config.config_read_column, db.Books) q = query.outerjoin(db.books_series_link, db.books_series_link.c.book == db.Books.id) \ .outerjoin(db.Series) \ .filter(calibre_db.common_filters(True)) # parse multiselects to a complete dict tags = dict() elements = ['tag', 'serie', 'shelf', 'language', 'extension'] for element in elements: tags['include_' + element] = term.get('include_' + element) tags['exclude_' + element] = term.get('exclude_' + element) author_name = term.get("author_name") book_title = term.get("book_title") publisher = term.get("publisher") pub_start = term.get("publishstart") pub_end = term.get("publishend") rating_low = term.get("ratinghigh") rating_high = term.get("ratinglow") description = term.get("comment") read_status = term.get("read_status") if author_name: author_name = author_name.strip().lower().replace(',', '|') if book_title: book_title = book_title.strip().lower() if publisher: publisher = publisher.strip().lower() search_term = [] cc_present = False for c in cc: if c.datatype == "datetime": column_start = term.get('custom_column_' + str(c.id) + '_start') column_end = term.get('custom_column_' + str(c.id) + '_end') if column_start: search_term.extend([u"{} >= {}".format(c.name, format_date(datetime.strptime(column_start, "%Y-%m-%d").date(), format='medium', locale=get_locale()) )]) cc_present = True if column_end: search_term.extend([u"{} <= {}".format(c.name, format_date(datetime.strptime(column_end, "%Y-%m-%d").date(), format='medium', locale=get_locale()) )]) cc_present = True elif term.get('custom_column_' + str(c.id)): search_term.extend([(u"{}: {}".format(c.name, term.get('custom_column_' + str(c.id))))]) cc_present = True if any(tags.values()) or author_name or book_title or \ publisher or pub_start or pub_end or rating_low or rating_high \ or description or cc_present or read_status: search_term, pub_start, pub_end = extend_search_term(search_term, author_name, book_title, publisher, pub_start, pub_end, tags, rating_high, rating_low, read_status) # q = q.filter() if author_name: q = q.filter(db.Books.authors.any(func.lower(db.Authors.name).ilike("%" + author_name + "%"))) if book_title: q = q.filter(func.lower(db.Books.title).ilike("%" + book_title + "%")) if pub_start: q = q.filter(func.datetime(db.Books.pubdate) > func.datetime(pub_start)) if pub_end: q = q.filter(func.datetime(db.Books.pubdate) < func.datetime(pub_end)) q = adv_search_read_status(q, read_status) if publisher: q = q.filter(db.Books.publishers.any(func.lower(db.Publishers.name).ilike("%" + publisher + "%"))) q = adv_search_text(q, tags['include_tag'], tags['exclude_tag'], db.Tags.id) q = adv_search_text(q, tags['include_serie'], tags['exclude_serie'], db.Series.id) q = adv_search_text(q, tags['include_extension'], tags['exclude_extension'], db.Data.format) q = adv_search_shelf(q, tags['include_shelf'], tags['exclude_shelf']) q = adv_search_language(q, tags['include_language'], tags['exclude_language']) q = adv_search_ratings(q, rating_high, rating_low, ) if description: q = q.filter(db.Books.comments.any(func.lower(db.Comments.text).ilike("%" + description + "%"))) # search custom culumns try: q = adv_search_custom_columns(cc, term, q) except AttributeError as ex: log.error_or_exception(ex) flash(_("Error on search for custom columns, please restart Calibre-Web"), category="error") q = q.order_by(*sort_param).all() flask_session['query'] = json.dumps(term) ub.store_combo_ids(q) result_count = len(q) if offset is not None and limit is not None: offset = int(offset) limit_all = offset + int(limit) pagination = Pagination((offset / (int(limit)) + 1), limit, result_count) else: offset = 0 limit_all = result_count entries = calibre_db.order_authors(q[offset:limit_all], list_return=True, combined=True) return render_title_template('search.html', adv_searchterm=search_term, pagination=pagination, entries=entries, result_count=result_count, title=_(u"Advanced Search"), page="advsearch", order=order[1]) @web.route("/advsearch", methods=['GET']) @login_required_if_no_ano
@web.route("/advsearch", methods=['GET']) @login_required_if_no_ano
1,056
web.py
Python
cps/web.py
32a3c45ee0f7e13bd61075f32a4dcebc415585a1
calibre-web
31
2,284
43
8
2
59
5
0
109
286
all
added conversion for syft object to json working client.users table
https://github.com/OpenMined/PySyft.git
def all(self) -> List[SyftObject]: return self.find({}) # def delete(self, **kwargs: Any) -> None: # # session_local = sessionmaker(autocommit=False, autoflush=False, bind=self.db)() # session_local.query(self._schema).filter_by(**kwargs).delete() # session_local.commit() # session_local.close() # def modify(self, query: Dict[Any, Any], values: Dict[Any, Any]) -> None: # # session_local = sessionmaker(autocommit=False, autoflush=False, bind=self.db)() # session_local.query(self._schema).filter_by(**query).update(values) # session_local.commit() # session_local.close() # def contain(self, **kwargs: Any) -> bool: # session_local = sessionmaker(autocommit=False, autoflush=False, bind=self.db)() # objects = session_local.query(self._schema).filter_by(**kwargs).all() # session_local.close() # return len(objects) != 0 # def __len__(self) -> int: # session_local = sessionmaker(autocommit=False, autoflush=False, bind=self.db)() # result = session_local.query(self._schema).count() # session_local.close() # return result # def clear(self) -> None: # local_session = sessionmaker(bind=self.db)() # local_session.query(self._schema).delete() # local_session.commit() # local_session.close()
18
database_manager.py
Python
packages/syft/src/syft/core/node/common/node_manager/database_manager.py
99efeb99fc7753a28cbf53906b6502aadf01fcf1
PySyft
1
267,791
6
7
3
29
5
0
6
20
root_namespace
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def root_namespace(self) -> t.Any: return self.namespaces[0]
17
parsers.py
Python
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
1
203,275
58
12
17
146
12
0
65
248
test_body_after_POST_multipart_related
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
https://github.com/django/django.git
def test_body_after_POST_multipart_related(self): # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b"\r\n".join([ b'--boundary', b'Content-ID: id; name="name"', b'', b'value', b'--boundary--' ]) payload = FakePayload(payload_data) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/related; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload, }) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data)
83
tests.py
Python
tests/requests/tests.py
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
1
101,209
13
12
6
72
11
0
16
84
has_thumbnails
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
https://github.com/deepfakes/faceswap.git
def has_thumbnails(self): retval = all(face.get("thumb") for frame in self._alignments_dict.values() for face in frame["faces"]) logger.trace(retval) return retval
42
alignments.py
Python
lib/align/alignments.py
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
3
208,278
47
13
8
126
11
0
59
141
_freeze_gid
Canvas.py doc enhancement (#7902) * Added reference in comment to issue #6973 regarding slow downs when using a group with a generator of many tasks * Added TODO comment to group.skew() to consider removing it * Enhanced doc for canvas.group.from_dict() * Enhanced doc for canvas.group._prepared() * Enhanced doc for canvas.group._apply_tasks() * Enhanced doc for canvas.group._freeze_gid() * Enhanced doc for canvas.group._freeze_unroll() * Enhanced doc for canvas.group._unroll_tasks() * Enhanced doc for canvas.group._freeze_tasks() * Enhanced doc for canvas.group._freeze_group_tasks() * Update celery/canvas.py Co-authored-by: Omer Katz <[email protected]> * Added example doc for group.from_dict() Co-authored-by: Omer Katz <[email protected]>
https://github.com/celery/celery.git
def _freeze_gid(self, options): # remove task_id and use that as the group_id, # if we don't remove it then every task will have the same id... options = {**self.options, **{ k: v for k, v in options.items() if k not in self._IMMUTABLE_OPTIONS or k not in self.options }} options['group_id'] = group_id = ( options.pop('task_id', uuid())) return options, group_id, options.get('root_id')
77
canvas.py
Python
celery/canvas.py
4e2280b12958edd0acdcd40e4bb845bbc3070791
celery
4
38,996
4
10
2
35
7
0
4
10
get_expert_parallel_world_size
MoE inference + PR-MoE model support (#1705) Co-authored-by: Reza Yazdani <[email protected]> Co-authored-by: Zhewei Yao <[email protected]> Co-authored-by: Ammar Ahmad Awan <[email protected]> Co-authored-by: Jeff Rasley <[email protected]> Co-authored-by: Samyam Rajbhandari <[email protected]>
https://github.com/microsoft/DeepSpeed.git
def get_expert_parallel_world_size(group_name): return torch.distributed.get_world_size(group=get_expert_parallel_group(group_name))
20
groups.py
Python
deepspeed/utils/groups.py
e46d808a1b6cb7e04cb2806e38547b1e3e50c25a
DeepSpeed
1
46,793
39
13
14
214
19
0
48
126
fix_group_permissions
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
https://github.com/apache/airflow.git
def fix_group_permissions(): console.print("[bright_blue]Fixing group permissions[/]") files_to_fix_result = run_command(['git', 'ls-files', './'], capture_output=True, text=True) if files_to_fix_result.returncode == 0: files_to_fix = files_to_fix_result.stdout.strip().split('\n') for file_to_fix in files_to_fix: change_file_permission(Path(file_to_fix)) directories_to_fix_result = run_command( ['git', 'ls-tree', '-r', '-d', '--name-only', 'HEAD'], capture_output=True, text=True ) if directories_to_fix_result.returncode == 0: directories_to_fix = directories_to_fix_result.stdout.strip().split('\n') for directory_to_fix in directories_to_fix: change_directory_permission(Path(directory_to_fix))
123
run_utils.py
Python
dev/breeze/src/airflow_breeze/utils/run_utils.py
4ffd4f09532fceb67675fce4c1f5cd383eff992e
airflow
5
105,716
23
13
7
91
14
0
29
57
_load_table_data
Fix bugs in msr_sqa dataset (#3715) * Fix problems in msr_sqa * Update metadata JSON * Update version * Update dummy data version * Update metadata JSON Co-authored-by: Tianbao Xie <[email protected]> Co-authored-by: Albert Villanova del Moral <[email protected]>
https://github.com/huggingface/datasets.git
def _load_table_data(table_file): rows = [] table_data = pd.read_csv(table_file) # the first line is header header = list(table_data.columns) for row_data in table_data.values: rows.append([str(_) for _ in list(row_data)]) return header, rows
55
msr_sqa.py
Python
datasets/msr_sqa/msr_sqa.py
55924c5e3b823a3b1206269bb0892cd3a9508570
datasets
3
28,887
12
13
6
70
8
0
12
38
call_event
[Change] Change the way transactions are handled in mutations (#10606) * refactor account, app, attribute mutations * add checkout refactor * Change transactions on all mutations to context, and use call_event method to trigger webhooks * remove comments * refactor call_event and move app load outside transaction in few places * remove redundant code from merge conflicts * switch calling call_event to more readable way * fix missed event call * refactor and add transaction in permission group * move call_event function to utils, fix few event calls after review * fix one event call after review * fix transaction scope
https://github.com/saleor/saleor.git
def call_event(func_obj, *func_args): connection = transaction.get_connection() if connection.in_atomic_block: transaction.on_commit(lambda: func_obj(*func_args)) else: func_obj(*func_args)
40
events.py
Python
saleor/core/utils/events.py
89786f24b5296a23c093fcfea90893292473b275
saleor
2
107,135
20
12
8
96
13
0
27
91
set_constrained_layout
ENH: implement and use base layout_engine for more flexible layout.
https://github.com/matplotlib/matplotlib.git
def set_constrained_layout(self, constrained): if constrained is None: constrained = mpl.rcParams['figure.constrained_layout.use'] _constrained = bool(constrained) _parameters = constrained if isinstance(constrained, dict) else {} if _constrained: self.set_layout_engine(ConstrainedLayoutEngine(**_parameters)) self.stale = True
58
figure.py
Python
lib/matplotlib/figure.py
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
matplotlib
4
31,845
38
15
7
87
10
1
43
115
_reorder_cache
Add MVP model (#17787) * Add MVP model * Update README * Remove useless module * Update docs * Fix bugs in tokenizer * Remove useless test * Remove useless module * Update vocab * Remove specifying * Remove specifying * Add #Copied ... statement * Update paper link * Remove useless TFMvp * Add #Copied ... statement * Fix style in test mvp model * Fix some typos * Fix properties of unset special tokens in non verbose mode * Update paper link * Update MVP doc * Update MVP doc * Fix README * Fix typos in docs * Update docs
https://github.com/huggingface/transformers.git
def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( , MVP_START_DOCSTRING, )
@add_start_docstrings( """ Mvp model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MVP_START_DOCSTRING, )
48
modeling_mvp.py
Python
src/transformers/models/mvp/modeling_mvp.py
3cff4cc58730409c68f8afa2f3b9c61efa0e85c6
transformers
3
95,753
49
17
48
407
28
0
106
634
test_fetching_release_sessions_time_bounds_for_different_release
fix(release_health): Adjust granularity to be bug-compatible with sessions (#31246)
https://github.com/getsentry/sentry.git
def test_fetching_release_sessions_time_bounds_for_different_release(self): # Same release session self.store_session( self.build_session( release=self.session_release, environment="prod", status="exited", started=self.session_started - 3600 * 2, received=self.received - 3600 * 2, ) ) # Different release session self.store_session( self.build_session( release=self.session_crashed_release, environment="prod", status="crashed", started=self.session_started - 3600 * 2, received=self.received - 3600 * 2, ) ) expected_formatted_lower_bound = ( datetime.utcfromtimestamp(self.session_started - 3600 * 2) .replace(minute=0) .isoformat()[:19] + "Z" ) expected_formatted_upper_bound = ( datetime.utcfromtimestamp(self.session_started).replace(minute=0).isoformat()[:19] + "Z" ) # Test for self.session_release data = self.backend.get_release_sessions_time_bounds( project_id=self.project.id, release=self.session_release, org_id=self.organization.id, environments=["prod"], ) assert data == { "sessions_lower_bound": expected_formatted_lower_bound, "sessions_upper_bound": expected_formatted_upper_bound, } # Test for self.session_crashed_release data = self.backend.get_release_sessions_time_bounds( project_id=self.project.id, release=self.session_crashed_release, org_id=self.organization.id, environments=["prod"], ) assert data == { "sessions_lower_bound": expected_formatted_lower_bound, "sessions_upper_bound": expected_formatted_upper_bound, }
256
test_sessions.py
Python
tests/snuba/sessions/test_sessions.py
e851f67ee6ca0185112a0d1c919ee63f6bed98e3
sentry
1
167,732
14
11
32
67
8
0
14
75
to_dense
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
https://github.com/pandas-dev/pandas.git
def to_dense(self) -> Series: from pandas import Series return Series( self._parent.array.to_dense(), index=self._parent.index, name=self._parent.name, )
42
accessor.py
Python
pandas/core/arrays/sparse/accessor.py
f65417656ba8c59438d832b6e2a431f78d40c21c
pandas
1
78,248
12
12
5
61
6
0
15
54
base_queryset
Add generic settings to compliment site-specific settings (#8327)
https://github.com/wagtail/wagtail.git
def base_queryset(cls): queryset = cls.objects.all() if cls.select_related is not None: queryset = queryset.select_related(*cls.select_related) return queryset
36
models.py
Python
wagtail/contrib/settings/models.py
d967eccef28ce47f60d26be1c28f2d83a25f40b0
wagtail
2
107,364
7
9
3
40
6
0
7
28
locator
MNT: make colorbars locators and formatters properties
https://github.com/matplotlib/matplotlib.git
def locator(self, loc): self._long_axis().set_major_locator(loc) self._locator = loc
23
colorbar.py
Python
lib/matplotlib/colorbar.py
6010bb43ed01c48c7c403569dd210490b236a853
matplotlib
1
133,252
9
8
21
30
4
0
9
23
test_failure_during_resize
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_failure_during_resize(ray_start_2_cpus): # noqa: F811 if not dist.is_available(): return
98
test_torch_failure.py
Python
python/ray/util/sgd/tests/test_torch_failure.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
22,083
26
13
11
109
9
0
39
116
dispatch_hook
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def dispatch_hook(key, hooks, hook_data, **kwargs): hooks = hooks or {} hooks = hooks.get(key) if hooks: if hasattr(hooks, "__call__"): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data
68
hooks.py
Python
pipenv/patched/pip/_vendor/requests/hooks.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
6
42,539
44
12
12
139
14
0
55
179
frame_by_id
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
https://github.com/nltk/nltk.git
def frame_by_id(self, fn_fid, ignorekeys=[]): # get the name of the frame with this id number try: fentry = self._frame_idx[fn_fid] if "_type" in fentry: return fentry # full frame object is cached name = fentry["name"] except TypeError: self._buildframeindex() name = self._frame_idx[fn_fid]["name"] except KeyError as e: raise FramenetError(f"Unknown frame id: {fn_fid}") from e return self.frame_by_name(name, ignorekeys, check_cache=False)
81
framenet.py
Python
nltk/corpus/reader/framenet.py
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
nltk
4
45,477
11
12
4
85
12
0
11
31
downgrade
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
https://github.com/apache/airflow.git
def downgrade(): with op.batch_alter_table('connection') as batch_op: batch_op.drop_constraint(constraint_name="unique_conn_id", type_="unique") batch_op.alter_column("conn_id", nullable=True, existing_type=sa.String(250))
46
8d48763f6d53_add_unique_constraint_to_conn_id.py
Python
airflow/migrations/versions/8d48763f6d53_add_unique_constraint_to_conn_id.py
69f6f9e01b6df76c3c8fa266d460324163957887
airflow
1
288,953
8
7
3
31
4
0
8
22
serial
Add button entities for Lutron Caseta/RA3/HWQSX (#79963) Co-authored-by: J. Nick Koston <[email protected]>
https://github.com/home-assistant/core.git
def serial(self) -> int | None: return self._device["serial"]
17
__init__.py
Python
homeassistant/components/lutron_caseta/__init__.py
82322e3804af9cac55c6bea106f4bb0faff4c298
core
1
304,022
10
10
3
45
6
0
10
24
async_discovered_devices
Rework bluetooth to support scans from multiple sources (#76900)
https://github.com/home-assistant/core.git
def async_discovered_devices(self) -> list[BLEDevice]: return [history[0] for history in self.history.values()]
28
manager.py
Python
homeassistant/components/bluetooth/manager.py
3bcc274dfa90d7d3c01ace83137c46a0898c107f
core
2
247,637
17
10
9
143
9
0
21
77
test_callback_error
Add type hints to some tests/handlers files. (#12224)
https://github.com/matrix-org/synapse.git
def test_callback_error(self) -> None: request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "") request.args[b"error_description"] = [b"some description"] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "some description")
83
test_oidc.py
Python
tests/handlers/test_oidc.py
5dd949bee6158a8b651db9f2ae417a62c8184bfd
synapse
1
126,136
2
6
19
13
2
0
2
5
test_checkpoint_success_by_http
[workflow] http_event_provider and accompanied listener (#26010) ### Why are these changes needed? This PR enhances workflow functionality to receive external events from a Serve based HTTP endpoint. A workflow can then consume events asynchronously as they arrive. ### Design Logic A `workflow.wait_for_event` node subscribes to the endpoint instantiated by a Ray Serve deployment of class `http_event_provider.HTTPEventProvider`. The subscription is made through a helper class `http_event_provider.HTTPListener`. `HTTPListener` implements the methods of `EventListener` to poll from and confirm event checkpointing to `HTTPEventProvider`, before `HTTPEventProvider`acknowledges success or error to the event submitter. ### Architecture Improvement The logic of this enhancement conforms with existing workflow runtime design.
https://github.com/ray-project/ray.git
def test_checkpoint_success_by_http(workflow_start_regular_shared_serve):
101
test_http_events_3.py
Python
python/ray/workflow/tests/test_http_events_3.py
659d25a3a9c4794db9dbe8f428ec587470b261b0
ray
4
187,140
21
15
25
231
14
0
38
140
test_url
plugin.api.validate: implement ValidationError - Implement `ValidationError` - Inherit from `ValueError` to preserve backwards compatiblity - Allow collecting multiple errors (AnySchema) - Keep an error stack of parent `ValidationError`s or other exceptions - Format error stack when converting error to string - Raise `ValidationError` instead of `ValueError` - Add error contexts where it makes sense - Add schema names to error instances - Add and update tests
https://github.com/streamlink/streamlink.git
def test_url(self): url_ = "https://google.se/path" assert validate(url(), url_) assert validate(url(scheme="http"), url_) assert validate(url(path="/path"), url_) with self.assertRaises(ValueError) as cm: validate(url(), "foo") assert_validationerror(cm.exception, ) with self.assertRaises(ValueError) as cm: validate(url(foo="bar"), "https://foo") assert_validationerror(cm.exception, ) with self.assertRaises(ValueError) as cm: validate(url(path=endswith(".m3u8")), "https://foo/bar.mpd") assert_validationerror(cm.exception, )
128
test_api_validate.py
Python
tests/test_api_validate.py
3d44da082b3ba202b9d0557bfd8ce747a1d7960c
streamlink
1
267,457
13
6
34
25
5
0
13
33
find_matches
ansible-galaxy - support resolvelib versions >= 0.5.3, < 0.9.0 (#77649) * ansible-galaxy - support resolvelib versions >= 0.5.3, <= 0.8.1 Test incompatibilities are removed for resolvelib >= 0.6.0 Test against the latest 0.8.x version and fix requirements * Fix tests - use a venv for testing the range of resolvelib versions * Update temporary hardcoded fallback for ansible-test * Update hardcoded upperbound for sanity tests * Make error check more flexible
https://github.com/ansible/ansible.git
def find_matches(self, *args, **kwargs): # type: (t.Any, t.Any) -> list[Candidate] r raise NotImplementedError
15
providers.py
Python
lib/ansible/galaxy/dependency_resolution/providers.py
143e7fb45e7b916fa973613000e97ee889f5666c
ansible
1
246,021
136
15
98
755
58
0
243
1,189
test_backfill_floating_outlier_membership_auth
Refactor the way we set `outlier` (#11634) * `_auth_and_persist_outliers`: mark persisted events as outliers Mark any events that get persisted via `_auth_and_persist_outliers` as, well, outliers. Currently this will be a no-op as everything will already be flagged as an outlier, but I'm going to change that. * `process_remote_join`: stop flagging as outlier The events are now flagged as outliers later on, by `_auth_and_persist_outliers`. * `send_join`: remove `outlier=True` The events created here are returned in the result of `send_join` to `FederationHandler.do_invite_join`. From there they are passed into `FederationEventHandler.process_remote_join`, which passes them to `_auth_and_persist_outliers`... which sets the `outlier` flag. * `get_event_auth`: remove `outlier=True` stop flagging the events returned by `get_event_auth` as outliers. This method is only called by `_get_remote_auth_chain_for_event`, which passes the results into `_auth_and_persist_outliers`, which will flag them as outliers. * `_get_remote_auth_chain_for_event`: remove `outlier=True` we pass all the events into `_auth_and_persist_outliers`, which will now flag the events as outliers. * `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter This param is now never set to True, so we can remove it. * `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param This is no longer set anywhere, so we can remove it. * `get_pdu`: remove unused `outlier` parameter ... and chase it down into `get_pdu_from_destination_raw`. * `event_from_pdu_json`: remove redundant `outlier` param This is never set to `True`, so can be removed. * changelog * update docstring
https://github.com/matrix-org/synapse.git
def test_backfill_floating_outlier_membership_auth(self): OTHER_SERVER = "otherserver" OTHER_USER = "@otheruser:" + OTHER_SERVER # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as( room_creator=user_id, is_public=True, tok=tok, extra_content={ "preset": "public_chat", }, ) room_version = self.get_success(self.store.get_room_version(room_id)) prev_event_ids = self.get_success(self.store.get_prev_events_for_room(room_id)) ( most_recent_prev_event_id, most_recent_prev_event_depth, ) = self.get_success(self.store.get_max_depth_of(prev_event_ids)) # mapping from (type, state_key) -> state_event_id prev_state_map = self.get_success( self.state_store.get_state_ids_for_event(most_recent_prev_event_id) ) # List of state event ID's prev_state_ids = list(prev_state_map.values()) auth_event_ids = prev_state_ids auth_events = list( self.get_success(self.store.get_events(auth_event_ids)).values() ) # build a floating outlier member state event fake_prev_event_id = "$" + random_string(43) member_event_dict = { "type": EventTypes.Member, "content": { "membership": "join", }, "state_key": OTHER_USER, "room_id": room_id, "sender": OTHER_USER, "depth": most_recent_prev_event_depth, "prev_events": [fake_prev_event_id], "origin_server_ts": self.clock.time_msec(), "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, } builder = self.hs.get_event_builder_factory().for_room_version( room_version, member_event_dict ) member_event = self.get_success( builder.build( prev_event_ids=member_event_dict["prev_events"], auth_event_ids=self._event_auth_handler.compute_auth_events( builder, prev_state_map, for_verification=False, ), depth=member_event_dict["depth"], ) ) # Override the signature added from "test" homeserver that we created the event with member_event.signatures = member_event_dict["signatures"] # Add the new member_event to the StateMap prev_state_map[ (member_event.type, member_event.state_key) ] = member_event.event_id auth_events.append(member_event) # build and send an event authed based on the member event message_event_dict = { "type": EventTypes.Message, "content": {}, "room_id": room_id, "sender": OTHER_USER, "depth": most_recent_prev_event_depth, "prev_events": prev_event_ids.copy(), "origin_server_ts": self.clock.time_msec(), "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, } builder = self.hs.get_event_builder_factory().for_room_version( room_version, message_event_dict ) message_event = self.get_success( builder.build( prev_event_ids=message_event_dict["prev_events"], auth_event_ids=self._event_auth_handler.compute_auth_events( builder, prev_state_map, for_verification=False, ), depth=message_event_dict["depth"], ) ) # Override the signature added from "test" homeserver that we created the event with message_event.signatures = message_event_dict["signatures"] # Stub the /event_auth response from the OTHER_SERVER
523
test_federation.py
Python
tests/handlers/test_federation.py
0fb3dd0830e476c0e0b89c3bf6c7855a4129ff11
synapse
1
288,874
34
19
25
168
23
0
37
308
test_meross_mss565_setup
Migrate HomeKit Controller to use stable identifiers (#80064)
https://github.com/home-assistant/core.git
async def test_meross_mss565_setup(hass): accessories = await setup_accessories_from_file(hass, "mss565.json") await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="MSS565-28da", model="MSS565", manufacturer="Meross", sw_version="4.1.9", hw_version="4.0.0", serial_number="BB1121", devices=[], entities=[ EntityTestInfo( entity_id="light.mss565_28da_dimmer_switch", friendly_name="MSS565-28da Dimmer Switch", unique_id="00:00:00:00:00:00_1_12", capabilities={"supported_color_modes": ["brightness"]}, state=STATE_ON, ), ], ), )
100
test_mss565.py
Python
tests/components/homekit_controller/specific_devices/test_mss565.py
f23b1750e85f07091eb896a0b12b8f95e5646338
core
1
276,934
47
10
7
137
14
0
58
121
test_similar_matrices
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_similar_matrices(self, exact_kernel_fn, expected_values): x = tf.constant([1.0, 3.4, -2.1, 0.9, 3.3, -2.0], shape=[2, 3]) y = tf.constant([1.1, 3.35, -2.05]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # The 2 rows of x are close to y. The pairwise kernel values (similarity # scores) are somewhat close to the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
112
kernelized_utils_test.py
Python
keras/utils/kernelized_utils_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
154,387
33
17
13
134
21
0
39
120
trigger_import
FEAT-#4913: Enabling pyhdk (#4900) Co-authored-by: ienkovich <[email protected]> Signed-off-by: izamyati <[email protected]>
https://github.com/modin-project/modin.git
def trigger_import(*dfs): if ASV_USE_STORAGE_FORMAT != "omnisci" or ASV_USE_IMPL == "pandas": return from modin.experimental.core.execution.native.implementations.omnisci_on_native.db_worker import ( DbWorker, ) for df in dfs: df.shape # to trigger real execution df._query_compiler._modin_frame._partitions[0][ 0 ].frame_id = DbWorker().import_arrow_table( df._query_compiler._modin_frame._partitions[0][0].get() ) # to trigger real execution
86
common.py
Python
asv_bench/benchmarks/utils/common.py
1c0935c1bc0856d43f69c1e32498636ee24ebc85
modin
4
10,901
16
10
11
95
11
0
22
119
wait_start_success
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <[email protected]>
https://github.com/jina-ai/jina.git
def wait_start_success(self): _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 if self._wait_for_ready_or_shutdown(_timeout): self._check_failed_to_start() self.logger.debug(__ready_msg__) else: self._fail_start_timeout(_timeout)
56
__init__.py
Python
jina/orchestrate/pods/__init__.py
13edc16d806fb5d77a6849551178ccc75937f25f
jina
3
165,734
5
7
2
22
4
0
5
19
_maybe_convert_setitem_value
REF: move ArrowStringArray.__setitem__ and related methods to ArrowExtensionArray (#46439)
https://github.com/pandas-dev/pandas.git
def _maybe_convert_setitem_value(self, value): raise NotImplementedError()
12
_mixins.py
Python
pandas/core/arrays/_mixins.py
2d6a2c3e981208bf67bdd36cca726e8a399e487c
pandas
1
101,754
62
13
27
231
25
0
80
336
__call__
Alignments Tool updates - Copy info back to alignments file from faces
https://github.com/deepfakes/faceswap.git
def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc="Updating Alignments File from PNG Header", leave=False): src = meta["source"] alignment = meta["alignments"] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src["source_filename"]) if len(faces) < src["face_index"] + 1: # list index out of range logger.debug("Skipped face '%s'. Index does not exist in alignments file", src["original_filename"]) continue face = faces[src["face_index"]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info("Updated alignments file from PNG Data: %s", self._counts) return retval
138
jobs_faces.py
Python
tools/alignments/jobs_faces.py
c79175cbde5600bebd65785f3821fc74b3a80cbe
faceswap
6
144,345
13
8
3
53
10
0
13
34
_bind
[Ray DAG] Implement experimental Ray DAG API for task/class (#22058)
https://github.com/ray-project/ray.git
def _bind(self, *args, **kwargs): from ray.experimental.dag.function_node import FunctionNode return FunctionNode(self._function, args, kwargs, {})
36
remote_function.py
Python
python/ray/remote_function.py
c065e3f69ec248383d98b45a8d1c00832ccfdd57
ray
1
153,577
13
7
4
35
4
0
15
44
pop
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
https://github.com/modin-project/modin.git
def pop(self, item): # noqa: PR01, RT01, D200 result = self[item] del self[item] return result
21
base.py
Python
modin/pandas/base.py
605efa618e7994681f57b11d04d417f353ef8d50
modin
1
294,496
30
12
15
95
11
0
40
153
connect_with_error_handling
Add config flow to fibaro (#65203) Co-authored-by: J. Nick Koston <[email protected]>
https://github.com/home-assistant/core.git
def connect_with_error_handling(self) -> None: try: connected = self.connect() if not connected: raise FibaroConnectFailed("Connect status is false") except HTTPException as http_ex: if http_ex.details == "Forbidden": raise FibaroAuthFailed from http_ex raise FibaroConnectFailed from http_ex except Exception as ex: raise FibaroConnectFailed from ex
55
__init__.py
Python
homeassistant/components/fibaro/__init__.py
e844c2380a2f970377bb4481bf3f6abe50ea006b
core
5
128,303
10
9
5
42
5
0
11
43
normalized_base_dir
[Datasets] Add `partitioning` parameter to `read_` functions (#28413)
https://github.com/ray-project/ray.git
def normalized_base_dir(self) -> str: if self._normalized_base_dir is None: self._normalize_base_dir() return self._normalized_base_dir
24
partitioning.py
Python
python/ray/data/datasource/partitioning.py
c3ff77f5a13395631a2af580ea4429ceb5dfea13
ray
2
208,102
15
10
4
78
12
0
18
47
test_group_lone
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <[email protected]> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <[email protected]> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <[email protected]> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <[email protected]> Co-authored-by: Omer Katz <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <[email protected]> Co-authored-by: Omer Katz <[email protected]>
https://github.com/celery/celery.git
def test_group_lone(self, manager): sig = group(identity.s(42), identity.s(42)) # [42, 42] res = sig.delay() assert res.get(timeout=TIMEOUT) == [42, 42]
48
test_canvas.py
Python
t/integration/test_canvas.py
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
celery
1
100,422
28
13
12
129
14
0
45
175
process_arguments
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def process_arguments(self): args = [arg for arg in sys.argv] # pylint:disable=unnecessary-comprehension if self.updater: from lib.utils import get_backend # pylint:disable=import-outside-toplevel args.append(f"--{get_backend()}") for arg in args: if arg == "--installer": self.is_installer = True if arg == "--nvidia": self.enable_cuda = True if arg == "--amd": self.enable_amd = True
70
setup.py
Python
setup.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
7
216,262
67
14
60
464
16
0
118
818
test_network_grains_secondary_ip
Filter secondary IP address by type (#61434) * Add filter for secondary ip addresses Should improve #61370 * Remove unnecessary space * Add test case for secondary IP address Test data for IPv6 secondary IP looks wrong but this is what _interfaces_ip() could return looking at the current code * Change order of tests because of caching issues Change order of test_network_grains_secondary_ip and test_network_grains_cache because of caching issues when running after test_network_grains_cache * Unify style in _interfaces_ip Unify coding style in _interfaces_ip for secondary ip addresses with the style for regular addresses. Also align the attributes for IPv6 secondary ip addresses with regular ipv6 addresses * Align IPv6 secondary IP attributes with changes to _interfaces_ip * Add changelog for fix of issue 61370 * Use salt.loader.grain_funcs for secondary ip test To work around caching issues when changing order of test_network_grains_cache and test_network_grains_secondary_ip use use salt.loader.grain_funcs in both functions. Also we hope this solves the issue, that this test worked in my local dev environment but not on the saltstack jenkins instances. * Use side_effect to simulate test data I don't understand what is different when these tests are run on the Jenkins infrastructure. Hope copying this from test_network_grains_cache make the tests work on them. * Changed checking for secondaryip address type * Add filter for secondary ip addresses Should improve #61370 * Remove unnecessary space * Add test case for secondary IP address Test data for IPv6 secondary IP looks wrong but this is what _interfaces_ip() could return looking at the current code * Change order of tests because of caching issues Change order of test_network_grains_secondary_ip and test_network_grains_cache because of caching issues when running after test_network_grains_cache * Unify style in _interfaces_ip Unify coding style in _interfaces_ip for secondary ip addresses with the style for regular addresses. Also align the attributes for IPv6 secondary ip addresses with regular ipv6 addresses * Align IPv6 secondary IP attributes with changes to _interfaces_ip * Add changelog for fix of issue 61370 * Use salt.loader.grain_funcs for secondary ip test To work around caching issues when changing order of test_network_grains_cache and test_network_grains_secondary_ip use use salt.loader.grain_funcs in both functions. Also we hope this solves the issue, that this test worked in my local dev environment but not on the saltstack jenkins instances. * Use side_effect to simulate test data I don't understand what is different when these tests are run on the Jenkins infrastructure. Hope copying this from test_network_grains_cache make the tests work on them. * Changed checking for secondaryip address type * Satisfy black code formatting Co-authored-by: Shane Lee <[email protected]> Co-authored-by: mayrstefan <[email protected]>
https://github.com/saltstack/salt.git
def test_network_grains_secondary_ip(tmp_path): data = { "wlo1": { "up": True, "hwaddr": "29:9f:9f:e9:67:f4", "inet": [ { "address": "172.16.13.85", "netmask": "255.255.248.0", "broadcast": "172.16.15.255", "label": "wlo1", } ], "inet6": [ { "address": "2001:4860:4860::8844", "prefixlen": "64", "scope": "fe80::6238:e0ff:fe06:3f6b%enp2s0", } ], "secondary": [ { "type": "inet", "address": "172.16.13.86", "netmask": "255.255.248.0", "broadcast": "172.16.15.255", "label": "wlo1", }, { "type": "inet6", "address": "2001:4860:4860::8888", "prefixlen": "64", "scope": "fe80::6238:e0ff:fe06:3f6b%enp2s0", }, ], } } cache_dir = tmp_path / "cache" extmods = tmp_path / "extmods" opts = { "cachedir": str(cache_dir), "extension_modules": str(extmods), "optimization_order": [0], } with patch("salt.utils.network.interfaces", side_effect=[data]): grains = salt.loader.grain_funcs(opts) ret_ip4 = grains["core.ip4_interfaces"]() assert ret_ip4["ip4_interfaces"]["wlo1"] == ["172.16.13.85", "172.16.13.86"] ret_ip6 = grains["core.ip6_interfaces"]() assert ret_ip6["ip6_interfaces"]["wlo1"] == [ "2001:4860:4860::8844", "2001:4860:4860::8888", ] ret_ip = grains["core.ip_interfaces"]() assert ret_ip["ip_interfaces"]["wlo1"] == [ "172.16.13.85", "2001:4860:4860::8844", "172.16.13.86", "2001:4860:4860::8888", ]
239
test_core.py
Python
tests/pytests/unit/grains/test_core.py
75c0cb7181d14f780b24ee5dd126f2836730053b
salt
1
294,005
60
13
20
180
19
0
91
274
async_press
Add update platform to WLED (#68454) * Add update platform to WLED * Copy pasta fixes * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Update tests/components/wled/test_update.py Co-authored-by: Martin Hjelmare <[email protected]> * Fix tests Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def async_press(self) -> None: LOGGER.warning( "The WLED update button '%s' is deprecated, please " "use the new update entity as a replacement", self.entity_id, ) current = self.coordinator.data.info.version beta = self.coordinator.data.info.version_latest_beta stable = self.coordinator.data.info.version_latest_stable # If we already run a pre-release, allow update to a newer # pre-release or newer stable, otherwise, offer a normal stable updates. version = stable if ( current is not None and beta is not None and (current.alpha or current.beta or current.release_candidate) and beta > current and beta > stable ): version = beta await self.coordinator.wled.upgrade(version=str(version))
110
button.py
Python
homeassistant/components/wled/button.py
40d4495ed098624a1f0816357b260bd8a298d969
core
8
138,162
53
15
28
259
32
0
66
446
profile_events
[core][state] Task events backend - port profile events and turn on task backend [4/n] (#31207)
https://github.com/ray-project/ray.git
def profile_events(self): self._check_connected() result = defaultdict(list) task_events = self.global_state_accessor.get_task_events() for i in range(len(task_events)): event = gcs_utils.TaskEvents.FromString(task_events[i]) profile = event.profile_events if not profile: continue component_type = profile.component_type component_id = binary_to_hex(profile.component_id) node_ip_address = profile.node_ip_address for event in profile.events: try: extra_data = json.loads(event.extra_data) except ValueError: extra_data = {} profile_event = { "event_type": event.event_name, "component_id": component_id, "node_ip_address": node_ip_address, "component_type": component_type, "start_time": event.start_time, "end_time": event.end_time, "extra_data": extra_data, } result[component_id].append(profile_event) return dict(result)
156
state.py
Python
python/ray/_private/state.py
53f68cd4d6b36965dddf9409015cba8ba313da2f
ray
5
128,881
2
6
8
13
2
0
2
5
test_torch_amp_with_custom_get_state
[AIR] Hard deprecate old Trainer, old callbacks (#29015) Hard deprecations for ray.train.Trainer, ray.train.callbacks and ray.train.checkpoint.CheckpointStrategy. Restart-on-failure logic from BackendExecutor has also been removed as it is superseded by Tune. Some tests have been refactored to use the new API. Tests that are no longer applicable have been removed. Signed-off-by: Antoni Baum <[email protected]> Signed-off-by: Amog Kamsetty <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
https://github.com/ray-project/ray.git
def test_torch_amp_with_custom_get_state(ray_start_4_cpus):
35
test_torch_trainer.py
Python
python/ray/train/tests/test_torch_trainer.py
d99eff919bf785f911e4eebc87ddc4960344a139
ray
1
250,103
53
11
30
239
17
0
80
268
test_chain_of_fail_cleanup
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
https://github.com/matrix-org/synapse.git
def test_chain_of_fail_cleanup(self) -> None: # Create the room graph event_id_a = self.create_and_send_event(self.room_id, self.user) event_id_sf1 = self.create_and_send_event( self.room_id, self.user, True, [event_id_a] ) event_id_sf2 = self.create_and_send_event( self.room_id, self.user, True, [event_id_sf1] ) event_id_b = self.create_and_send_event( self.room_id, self.user, False, [event_id_sf2] ) # Add the new extremity and check the latest events are as expected self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) self.assertEqual(latest_event_ids, [event_id_b])
156
test_cleanup_extrems.py
Python
tests/storage/test_cleanup_extrems.py
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
synapse
1
287,669
8
6
3
25
5
0
8
22
native_value
Link manually added MQTT entities the the MQTT config entry (#78547) Co-authored-by: Erik <[email protected]>
https://github.com/home-assistant/core.git
def native_value(self) -> StateType | datetime: return self._state
14
sensor.py
Python
homeassistant/components/mqtt/sensor.py
dea221b155da483fc1901d054e2aec38aabf618b
core
1
180,674
9
10
15
36
4
0
9
34
create
Fix async tests (#2000) * fix examples test * formatting * async examples * working on mix * comment out failing test * fixed interface problem * fixes
https://github.com/gradio-app/gradio.git
async def create(self) -> None: if self.cache_examples: await self.cache_interface_examples()
71
examples.py
Python
gradio/examples.py
a424832ec119c490d5d1d2d7d635b4a7232dc77e
gradio
4
30,249
8
8
8
27
3
0
8
14
get_config_file
fixed arguments for frozen env fixed pylint errors fixed arguments black fixed argument parser for all scenarios black docs black
https://github.com/spotDL/spotify-downloader.git
def get_config_file() -> Path: return get_spotdl_path() / "config.json"
13
config.py
Python
spotdl/utils/config.py
773398048b7990ab58e2998fe4d15355f7998774
spotify-downloader
1
29,303
17
9
18
78
10
0
19
41
_fetch_all_variants
Split test_product.py and test_variant.py into multiple files (#11173) * Split test_product.py into multiple files * Split test_variant.py into multiple files
https://github.com/saleor/saleor.git
def _fetch_all_variants(client, variables={}, permissions=None): query = response = client.post_graphql( query, variables, permissions=permissions, check_no_permissions=False ) content = get_graphql_content(response) return content["data"]["productVariants"]
49
test_product_variants_query.py
Python
saleor/graphql/product/tests/queries/test_product_variants_query.py
d90be220d6b687d08153934a51354011a3cb5ca1
saleor
1
322,151
111
16
46
596
64
0
166
546
evaluate
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def evaluate(model, criterion, data_loader, file_path, mode): example_all = [] with open(file_path, "r", encoding="utf-8") as fp: for line in fp: example_all.append(json.loads(line)) id2spo_path = os.path.join(os.path.dirname(file_path), "id2spo.json") with open(id2spo_path, 'r', encoding='utf8') as fp: id2spo = json.load(fp) model.eval() loss_all = 0 eval_steps = 0 formatted_outputs = [] current_idx = 0 for batch in tqdm(data_loader, total=len(data_loader)): eval_steps += 1 input_ids, seq_len, tok_to_orig_start_index, tok_to_orig_end_index, labels = batch logits = model(input_ids=input_ids) mask = (input_ids != 0).logical_and((input_ids != 1)).logical_and( (input_ids != 2)) loss = criterion(logits, labels, mask) loss_all += loss.numpy().item() probs = F.sigmoid(logits) logits_batch = probs.numpy() seq_len_batch = seq_len.numpy() tok_to_orig_start_index_batch = tok_to_orig_start_index.numpy() tok_to_orig_end_index_batch = tok_to_orig_end_index.numpy() formatted_outputs.extend( decoding(example_all[current_idx:current_idx + len(logits)], id2spo, logits_batch, seq_len_batch, tok_to_orig_start_index_batch, tok_to_orig_end_index_batch)) current_idx = current_idx + len(logits) loss_avg = loss_all / eval_steps print("eval loss: %f" % (loss_avg)) if mode == "predict": predict_file_path = os.path.join(args.data_path, 'predictions.json') else: predict_file_path = os.path.join(args.data_path, 'predict_eval.json') predict_zipfile_path = write_prediction_results(formatted_outputs, predict_file_path) if mode == "eval": precision, recall, f1 = get_precision_recall_f1(file_path, predict_zipfile_path) os.system('rm {} {}'.format(predict_file_path, predict_zipfile_path)) return precision, recall, f1 elif mode != "predict": raise Exception("wrong mode for eval func")
365
run_duie.py
Python
examples/information_extraction/DuIE/run_duie.py
621357338437ee420eabbbf5ab19065bc85e73a5
PaddleNLP
6
313,222
51
13
28
259
27
0
69
213
test_event_zones
Publish Nest Motion/Person events with optional user defined zone information (#66187) Publish Nest events with zone information if present. User defined zones are configured in the Google Home app, and are published with Motion/Person event.
https://github.com/home-assistant/core.git
async def test_event_zones(hass): events = async_capture_events(hass, NEST_EVENT) subscriber = await async_setup_devices( hass, "sdm.devices.types.DOORBELL", create_device_traits(["sdm.devices.traits.CameraMotion"]), ) registry = er.async_get(hass) entry = registry.async_get("camera.front") assert entry is not None event_map = { "sdm.devices.events.CameraMotion.Motion": { "eventSessionId": EVENT_SESSION_ID, "eventId": EVENT_ID, "zones": ["Zone 1"], }, } timestamp = utcnow() await subscriber.async_receive_event(create_events(event_map, timestamp=timestamp)) await hass.async_block_till_done() event_time = timestamp.replace(microsecond=0) assert len(events) == 1 assert event_view(events[0].data) == { "device_id": entry.device_id, "type": "camera_motion", "timestamp": event_time, "zones": ["Zone 1"], }
150
test_events.py
Python
tests/components/nest/test_events.py
b2f5ab200811c19284dea81ba298a4566fd87eda
core
1
294,621
37
15
21
166
13
0
43
246
test_frame_interval_property
Generic IP Camera configflow 2 (#52360) Co-authored-by: J. Nick Koston <[email protected]>
https://github.com/home-assistant/core.git
async def test_frame_interval_property(hass, mock_av_open): with mock_av_open: await async_setup_component( hass, "camera", { "camera": { "name": "config_test", "platform": "generic", "stream_source": "rtsp://example.com:554/rtsp/", "framerate": 5, }, }, ) await hass.async_block_till_done() request = Mock() with patch( "homeassistant.components.camera.async_get_still_stream" ) as mock_get_stream: await async_get_mjpeg_stream(hass, request, "camera.config_test") assert mock_get_stream.call_args_list[0][0][3] == pytest.approx(0.2)
93
test_camera.py
Python
tests/components/generic/test_camera.py
c1a2be72fc8b76b55cfde1823c5688100e397369
core
1
275,497
15
9
4
33
6
0
15
47
_resource_apply_dense
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _resource_apply_dense(self, grad, handle, apply_state): raise NotImplementedError( "`_resource_apply_dense` must be implemented in " "subclasses." )
18
optimizer_v2.py
Python
keras/optimizers/optimizer_v2/optimizer_v2.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
136,345
6
6
7
21
3
0
6
20
ping
[RLlib] Fault tolerant and elastic WorkerSets used across RLlib's algorithms (for sampling and evaluation). (#30118)
https://github.com/ray-project/ray.git
def ping(self) -> str: return "pong"
10
actor_manager.py
Python
rllib/utils/actor_manager.py
76cb42c578adf19a70a6b4401098a7a21e0d3b29
ray
1
128,824
29
11
8
144
16
0
35
67
list_changed_files
[ci] Fetch base branch before git diff in determine_tests_to_run.py (#29185) Some PRs (e.g. #29064) only change a single file but trigger the full test suite. The reason is likely that we have a stale master head ref. By fetching the latest head, we should be able to see more accurate results here and avoid running too many tests. Signed-off-by: Kai Fricke <[email protected]>
https://github.com/ray-project/ray.git
def list_changed_files(commit_range): base_branch = os.environ.get("BUILDKITE_PULL_REQUEST_BASE_BRANCH") if base_branch: pull_command = ["git", "fetch", "origin", base_branch] subprocess.check_call(pull_command) command = ["git", "diff", "--name-only", commit_range, "--"] out = subprocess.check_output(command) return [s.strip() for s in out.decode().splitlines() if s is not None]
82
determine_tests_to_run.py
Python
ci/pipeline/determine_tests_to_run.py
31347c026e985e9baa438bdaa3159fb5cd7dea6c
ray
4
213,050
40
14
13
203
21
0
58
181
_get_method_path_uri_list
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
https://github.com/aws/serverless-application-model.git
def _get_method_path_uri_list(self, path, stage): methods = list(self.get_path(path).keys()) uri_list = [] path = SwaggerEditor.get_path_without_trailing_slash(path) for m in methods: method = "*" if (m.lower() == self._X_ANY_METHOD or m.lower() == "any") else m.upper() resource = "execute-api:/${__Stage__}/" + method + path resource = ( Py27UniStr(resource) if isinstance(method, Py27UniStr) or isinstance(path, Py27UniStr) else resource ) resource = fnSub(resource, {"__Stage__": stage}) uri_list.extend([resource]) return uri_list
124
swagger.py
Python
samtranslator/swagger/swagger.py
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
6
183,840
27
10
7
96
12
0
33
54
test_stylesheet_apply_highest_specificity_wins_multiple_classes
Add various additional tests around CSS specificity
https://github.com/Textualize/textual.git
def test_stylesheet_apply_highest_specificity_wins_multiple_classes(): css = ".b.c {background: blue;} .a {background: red; color: lime;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c") stylesheet.apply(node) assert node.styles.background == Color(0, 0, 255) assert node.styles.color == Color(0, 255, 0)
58
test_stylesheet.py
Python
tests/css/test_stylesheet.py
4dd0d9fae43583638f34257f97d5749ca4f2c00c
textual
1
48,737
10
11
6
115
10
0
15
65
test_conflicting_specified_basename
raise ImproperlyConfigured exception if `basename` is not unique (#8438) * raise ImproperlyConfigured if basename already exists * rename already_registered function; return True/False * additional basename tests * additional basename tests * Update rest_framework/routers.py Co-authored-by: David Graves <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
https://github.com/encode/django-rest-framework.git
def test_conflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes') with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_duplicate', KWargedNoteViewSet, basename='notes')
67
test_routers.py
Python
tests/test_routers.py
48a21aa0eb3a95d32456c2a927eff9552a04231e
django-rest-framework
1