id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
299,377
18
10
8
72
7
0
24
92
async_set_repeat
Improve repeat and shuffle support for Squeezebox (#70941)
https://github.com/home-assistant/core.git
async def async_set_repeat(self, repeat): if repeat == REPEAT_MODE_ALL: repeat_mode = "playlist" elif repeat == REPEAT_MODE_ONE: repeat_mode = "song" else: repeat_mode = "none" await self._player.async_set_repeat(repeat_mode)
38
media_player.py
Python
homeassistant/components/squeezebox/media_player.py
0264f060e4fc988f3a0442ba8f951677816c11ea
core
3
213,043
106
13
56
542
30
0
195
789
add_resource_policy
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
https://github.com/aws/serverless-application-model.git
def add_resource_policy(self, resource_policy, path, stage): if resource_policy is None: return SwaggerEditor.validate_is_dict(resource_policy, "Resource Policy is not a valid dictionary.") aws_account_whitelist = resource_policy.get("AwsAccountWhitelist") aws_account_blacklist = resource_policy.get("AwsAccountBlacklist") ip_range_whitelist = resource_policy.get("IpRangeWhitelist") ip_range_blacklist = resource_policy.get("IpRangeBlacklist") source_vpc_whitelist = resource_policy.get("SourceVpcWhitelist") source_vpc_blacklist = resource_policy.get("SourceVpcBlacklist") # Intrinsic's supported in these properties source_vpc_intrinsic_whitelist = resource_policy.get("IntrinsicVpcWhitelist") source_vpce_intrinsic_whitelist = resource_policy.get("IntrinsicVpceWhitelist") source_vpc_intrinsic_blacklist = resource_policy.get("IntrinsicVpcBlacklist") source_vpce_intrinsic_blacklist = resource_policy.get("IntrinsicVpceBlacklist") if aws_account_whitelist is not None: resource_list = self._get_method_path_uri_list(path, stage) self._add_iam_resource_policy_for_method(aws_account_whitelist, "Allow", resource_list) if aws_account_blacklist is not None: resource_list = self._get_method_path_uri_list(path, stage) self._add_iam_resource_policy_for_method(aws_account_blacklist, "Deny", resource_list) if ip_range_whitelist is not None: resource_list = self._get_method_path_uri_list(path, stage) self._add_ip_resource_policy_for_method(ip_range_whitelist, "NotIpAddress", resource_list) if ip_range_blacklist is not None: resource_list = self._get_method_path_uri_list(path, stage) self._add_ip_resource_policy_for_method(ip_range_blacklist, "IpAddress", resource_list) if not SwaggerEditor._validate_list_property_is_resolved(source_vpc_blacklist): raise InvalidDocumentException( [ InvalidTemplateException( "SourceVpcBlacklist must be a list of strings. Use IntrinsicVpcBlacklist instead for values that use Intrinsic Functions" ) ] ) # FIXME: check if this requires py27 dict? blacklist_dict = { "StringEndpointList": source_vpc_blacklist, "IntrinsicVpcList": source_vpc_intrinsic_blacklist, "IntrinsicVpceList": source_vpce_intrinsic_blacklist, } resource_list = self._get_method_path_uri_list(path, stage) self._add_vpc_resource_policy_for_method(blacklist_dict, "StringEquals", resource_list) if not SwaggerEditor._validate_list_property_is_resolved(source_vpc_whitelist): raise InvalidDocumentException( [ InvalidTemplateException( "SourceVpcWhitelist must be a list of strings. Use IntrinsicVpcWhitelist instead for values that use Intrinsic Functions" ) ] ) whitelist_dict = { "StringEndpointList": source_vpc_whitelist, "IntrinsicVpcList": source_vpc_intrinsic_whitelist, "IntrinsicVpceList": source_vpce_intrinsic_whitelist, } self._add_vpc_resource_policy_for_method(whitelist_dict, "StringNotEquals", resource_list) self._doc[self._X_APIGW_POLICY] = self.resource_policy
322
swagger.py
Python
samtranslator/swagger/swagger.py
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
8
101,298
27
11
8
106
9
0
34
88
toggle_mask_display
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
https://github.com/deepfakes/faceswap.git
def toggle_mask_display(self) -> None: if not (self._model.config["learn_mask"] or self._model.config["penalized_mask_loss"]): return display_mask = not self._display_mask print("") # Break to not garble loss output logger.info("Toggling mask display %s...", "on" if display_mask else "off") self._display_mask = display_mask
58
_base.py
Python
plugins/train/trainer/_base.py
2beceffad9b15c1fd78f06b9b272563321c5a41e
faceswap
4
168,323
7
9
15
46
8
0
8
22
isin
TYP: pandas.core.series annotations from pandas-stubs (#47926) * TYP: pandas.core.series annotations from pandas-stubs * and DataFrame * more compatibility with pandas-stub tests * mypy address line-off-by-one (merge?) issue
https://github.com/pandas-dev/pandas.git
def isin(self, values) -> npt.NDArray[np.bool_]: return isin(np.asarray(self), values)
29
base.py
Python
pandas/core/arrays/base.py
50c2af1a6322375359a8bacfd79056ca4ab02df2
pandas
1
158,111
47
15
10
183
27
0
60
157
evaluate_accuracy_gpus
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
https://github.com/d2l-ai/d2l-zh.git
def evaluate_accuracy_gpus(net, data_iter, split_f=d2l.split_batch): # Query the list of devices devices = list(net.collect_params().values())[0].list_ctx() # No. of correct predictions, no. of predictions metric = d2l.Accumulator(2) for features, labels in data_iter: X_shards, y_shards = split_f(features, labels, devices) # Run in parallel pred_shards = [net(X_shard) for X_shard in X_shards] metric.add(sum(float(d2l.accuracy(pred_shard, y_shard)) for pred_shard, y_shard in zip( pred_shards, y_shards)), labels.size) return metric[0] / metric[1]
118
mxnet.py
Python
d2l/mxnet.py
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
4
107,880
55
12
17
352
33
1
74
294
test_colorbar_extension_inverted_axis
FIX: Handle inverted colorbar axes with extensions This fixes the colorbar extensions to use the proper color when the long axis is inverted.
https://github.com/matplotlib/matplotlib.git
def test_colorbar_extension_inverted_axis(orientation, extend, expected): data = np.arange(12).reshape(3, 4) fig, ax = plt.subplots() cmap = plt.get_cmap("viridis").with_extremes(under=(0, 0, 0, 1), over=(1, 1, 1, 1)) im = ax.imshow(data, cmap=cmap) cbar = fig.colorbar(im, orientation=orientation, extend=extend) if orientation == "horizontal": cbar.ax.invert_xaxis() else: cbar.ax.invert_yaxis() assert cbar._extend_patches[0].get_facecolor() == expected if extend == "both": assert len(cbar._extend_patches) == 2 assert cbar._extend_patches[1].get_facecolor() == (0, 0, 0, 1) else: assert len(cbar._extend_patches) == 1 @pytest.mark.parametrize('use_gridspec', [True, False]) @image_comparison(['cbar_with_orientation', 'cbar_locationing', 'double_cbar', 'cbar_sharing', ], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40})
@pytest.mark.parametrize('use_gridspec', [True, False]) @image_comparison(['cbar_with_orientation', 'cbar_locationing', 'double_cbar', 'cbar_sharing', ], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40})
177
test_colorbar.py
Python
lib/matplotlib/tests/test_colorbar.py
ec374f5148631e4d392ed7e6d4c454d163a62f21
matplotlib
3
191,688
12
11
5
71
9
0
12
28
test_run_args_and_kwargs_error
change run to use args and kwargs (#367) Before, `run` was not able to be called with multiple arguments. This expands the functionality.
https://github.com/hwchase17/langchain.git
def test_run_args_and_kwargs_error() -> None: chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar", foo="bar")
37
test_base.py
Python
tests/unit_tests/chains/test_base.py
8d0869c6d3ed63b2b15d4f75ea664e089dcc569d
langchain
1
31,016
13
9
3
53
9
0
16
31
_set_gradient_checkpointing
M-CTC-T Model (#16402) * added cbs to notebooks, made copy-paste error fix in generation_utils * initial push for mctc model * mctc feature extractor done * added processor, tokenizer and their tests for MCTC. Have added an MCTC modeling test, adjusting model code accordingly. * added processor, tokenizer and their tests for MCTC. Have added an MCTC modeling test, adjusting model code accordingly. * passing attention, now struggling to figure out how attention masks make sense here * works when excluding attention masks. ask later how one would integrate attention maskshere * bizarre configuration error (model prefix comes first in config dict json and messes up the order) * all passing but bizzarre config dict ordering issue when to_dict * passing all major tests * feature extraction, processor, tokenizer added & tests passing * style & consistency & other logistical fixes * copy paste fix * model after feature extraction working * commiting final feature extraction results; need to fix normalization * feature extraction passing tests; probably should add tests on the specific flashlight-copied functions? * delete print ; format code a bit * fixing tests * passing major tests * fixing styles * completed tokenization test with real example; not sure if these values are entirely correct. * last test fixes from local * reverting accidentally included custom setup configs * remove load tf weights; fix config error * testing couldnt import featureextractor * fix docs * fix docs * resolving comments * style fixes * style fixes * Update to MCTCConv1dSubSampler Co-authored-by: Patrick von Platen <[email protected]> * relposemb fixes * conv1d name issue; expecting config fail with paraentheses * fix config issue * fix config issue * fix config issue * change everything to MCTCT * fixing naming change errors * archive list * copyrights and docs * copyrights and docs * copyrights and docs * merge resolution * move tests, fix to changed optionaldependency structure * test directories changed * fixing tests * how to avoid tf tests? * how to avoid tf tests? * tests passing locally * allow mctctprocessor imported any env * allow mctctprocessor imported any env * fixed second round of feedback, need to fix docs * doc changes not being applied * all fixed * style fix * feedback fixes * fix copies and feature extraction style fix * Update tests/models/visual_bert/test_modeling_visual_bert.py Co-authored-by: Sylvain Gugger <[email protected]> * copy paste huggingface:main visual bert * added eof newline to visual bert; all tests are passing otherwise * fix slow tests by adding attention mask * change model id to speechbrain * make fix-copies * fix readme unwanted deletes * fixing readmes, make fix-copies * consistent M-CTC-T naming * Update src/transformers/models/mctct/__init__.py Co-authored-by: Patrick von Platen <[email protected]> * all fixed but variable naming * adjust double quotes * fixed variable names * copyright and mr quilter * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * correct slow tests * make fix-copies * Update src/transformers/models/mctct/configuration_mctct.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/mctct/configuration_mctct.py Co-authored-by: Sylvain Gugger <[email protected]> * m-ctc-t not mctct Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
https://github.com/huggingface/transformers.git
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (MCTCTEncoder)): module.gradient_checkpointing = value MCTCT_START_DOCSTRING = r MCTCT_INPUTS_DOCSTRING = r
26
modeling_mctct.py
Python
src/transformers/models/mctct/modeling_mctct.py
119e3c0fc83db5803d20d0749eef1220f27cfdc8
transformers
2
264,638
23
11
10
120
19
0
25
119
enqueue_job
Allow setting individual timeouts on scripts and reports
https://github.com/netbox-community/netbox.git
def enqueue_job(cls, func, name, obj_type, user, *args, **kwargs): job_result = cls.objects.create( name=name, obj_type=obj_type, user=user, job_id=uuid.uuid4() ) queue = django_rq.get_queue("default") queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs) return job_result
80
models.py
Python
netbox/extras/models/models.py
36d6ae33d15e93cc552827cdea363a9c00c7f823
netbox
1
273,842
9
9
7
51
8
0
12
69
_create_non_trackable_mask_cache
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _create_non_trackable_mask_cache(self): self._dropout_mask_cache = backend.ContextValueCache( self._create_dropout_mask ) self._recurrent_dropout_mask_cache = backend.ContextValueCache( self._create_recurrent_dropout_mask )
30
dropout_rnn_cell_mixin.py
Python
keras/layers/rnn/dropout_rnn_cell_mixin.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
296,571
23
12
7
78
14
0
23
73
_get_bytes
Add missing typing in AsusWRT router class (#70189) * Add missing typing in AsusWRT router class * Fix typing in device tracker * Fix mypy incompatible type
https://github.com/home-assistant/core.git
async def _get_bytes(self) -> dict[str, Any]: try: datas = await self._api.async_get_bytes_total() except (OSError, ValueError) as exc: raise UpdateFailed(exc) from exc return _get_dict(SENSORS_BYTES, datas)
48
router.py
Python
homeassistant/components/asuswrt/router.py
b7c1fbc842ef362672a32c762c5bfb5cb84326ab
core
2
259,605
85
17
33
328
24
0
125
603
predict_proba
DEP loss "log" in favor of "log loss" in SGDClassifier (#23046) Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def predict_proba(self, X): check_is_fitted(self) # TODO(1.3): Remove "log" if self.loss in ("log_loss", "log"): return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError( "predict_(log_)proba only supported when" " loss='log_loss' or loss='modified_huber' " "(%r given)" % self.loss )
204
_stochastic_gradient.py
Python
sklearn/linear_model/_stochastic_gradient.py
0c20ba744966d23ede67cffd7c5d2e0d01cd0658
scikit-learn
6
299,281
6
8
3
33
6
0
6
20
is_online
Use shorthand attributes in sharkiq vacuum (#70844)
https://github.com/home-assistant/core.git
def is_online(self) -> bool: return self.coordinator.device_is_online(self._serial_number)
19
vacuum.py
Python
homeassistant/components/sharkiq/vacuum.py
0425f8bc4e03beb848bac99ff3ee8f60ad45c74c
core
1
43,586
93
12
3
333
49
0
137
344
upload_file
Migrate Google example DAG s3_to_gcs to new design AIP-47 (#24641) related: #22447, #22430
https://github.com/apache/airflow.git
def upload_file(): s3_hook = S3Hook() s3_hook.load_file(filename=UPLOAD_FILE, key=PREFIX, bucket_name=BUCKET_NAME) with models.DAG( DAG_ID, schedule_interval='@once', start_date=datetime(2021, 1, 1), catchup=False, tags=['example', 's3'], ) as dag: create_s3_bucket = S3CreateBucketOperator( task_id="create_s3_bucket", bucket_name=BUCKET_NAME, region_name='us-east-1' ) create_gcs_bucket = GCSCreateBucketOperator( task_id="create_bucket", bucket_name=BUCKET_NAME, project_id=GCP_PROJECT_ID, ) # [START howto_transfer_s3togcs_operator] transfer_to_gcs = S3ToGCSOperator( task_id='s3_to_gcs_task', bucket=BUCKET_NAME, prefix=PREFIX, dest_gcs=GCS_BUCKET_URL ) # [END howto_transfer_s3togcs_operator] delete_s3_bucket = S3DeleteBucketOperator( task_id='delete_s3_bucket', bucket_name=BUCKET_NAME, force_delete=True, trigger_rule=TriggerRule.ALL_DONE, ) delete_gcs_bucket = GCSDeleteBucketOperator( task_id='delete_gcs_bucket', bucket_name=BUCKET_NAME, trigger_rule=TriggerRule.ALL_DONE ) ( # TEST SETUP create_gcs_bucket >> create_s3_bucket >> upload_file() # TEST BODY >> transfer_to_gcs # TEST TEARDOWN >> delete_s3_bucket >> delete_gcs_bucket ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when "tearDown" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag)
26
example_s3_to_gcs.py
Python
tests/system/providers/google/cloud/gcs/example_s3_to_gcs.py
7a7ca5016019f93ebee052a2bf99772145b7fc03
airflow
1
153,578
33
12
7
114
13
0
35
93
idxmax
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
https://github.com/modin-project/modin.git
def idxmax(self, axis=0, skipna=True): # noqa: PR01, RT01, D200 if not all(d != np.dtype("O") for d in self._get_dtypes()): raise TypeError("reduce operation 'argmax' not allowed for this dtype") axis = self._get_axis_number(axis) return self._reduce_dimension( self._query_compiler.idxmax(axis=axis, skipna=skipna) )
69
base.py
Python
modin/pandas/base.py
605efa618e7994681f57b11d04d417f353ef8d50
modin
3
9,813
36
15
17
121
11
0
40
171
ports
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def ports(self) -> Dict: # Container peas are started in separate docker containers, so we should not expose port_in here if ( ( self.params.pea_role and PeaRoleType.from_string(self.params.pea_role) != PeaRoleType.HEAD ) and self.params.uses and self.params.uses.startswith('docker://') ): return {} else: return {f'{self.params.port_in}/tcp': self.params.port_in}
67
dependencies.py
Python
daemon/api/dependencies.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
5
261,869
47
15
16
233
23
0
60
219
common_voice
Update TTS.tts formatters (#1228) * Return Dict from tts formatters * Make style
https://github.com/coqui-ai/TTS.git
def common_voice(root_path, meta_file, ignored_speakers=None): txt_file = os.path.join(root_path, meta_file) items = [] with open(txt_file, "r", encoding="utf-8") as ttf: for line in ttf: if line.startswith("client_id"): continue cols = line.split("\t") text = cols[2] speaker_name = cols[0] # ignore speakers if isinstance(ignored_speakers, list): if speaker_name in ignored_speakers: continue wav_file = os.path.join(root_path, "clips", cols[1].replace(".mp3", ".wav")) items.append({"text": text, "audio_file": wav_file, "speaker_name": "MCV_" + speaker_name}) return items
136
formatters.py
Python
TTS/tts/datasets/formatters.py
127118c6378168e3d36a1e5d19ede777fd20684f
TTS
5
89,804
7
8
6
39
6
0
7
28
get_response_from_control_silo
feat(hybrid-cloud): Create a base parser and middleware for webhooks (#42267) See [HC-468](https://getsentry.atlassian.net/browse/HC-468) Requires https://github.com/getsentry/sentry/pull/42260 This PR establishes the base parser that will be inherited from to forward webhooks to the appropriate integration. It is a slightly modified, portion of this [much larger PR](https://github.com/getsentry/sentry/pull/39169). It was split off in order to update that PR and make it more reviewable. Some background: The IntegrationControlMiddleware catches any incoming requests to the control silo with the `/extensions/` path prefix. It parses the provider out of the URL (e.g. `sentry.io/extensions/slack/something`), and passes the request along to that parser to determine how we handle the request (e.g. do we forward it to a region, multiple regions, handle it async, respond immediately from control, etc.) The BaseRequestParser provides a bunch of helpful methods to these parsers to make the actual integration-specific parsers as minimal as possible. They only need to implement a method for identifying the integration (e.g. from headers, from a signature, from a payload, etc), and how we respond to the webhook (allowing for different behaviour from different webhooks).
https://github.com/getsentry/sentry.git
def get_response_from_control_silo(self) -> HttpResponse: self._ensure_control_silo() return self.response_handler(self.request)
22
base.py
Python
src/sentry/middleware/integrations/parsers/base.py
d8609112d6e2f373692b414acff6d4a2f7466750
sentry
1
286,575
9
8
9
52
8
0
9
23
get_categories
[IMPROVE] Fix Docstring formatting/Fix missing, incomplete type hints (#3412) * Fixes * Update stocks_helper.py * update git-actions set-output to new format * Update stocks_helper.py * Update terminal_helper.py * removed LineAnnotateDrawer from qa_view * lint * few changes * updates * sdk auto gen modules done * Update stocks_helper.py * updates to changed imports, and remove first sdk_modules * Update generate_sdk.py * Update generate_sdk.py * pylint * revert stocks_helper * Update generate_sdk.py * Update sdk.py * Update generate_sdk.py * full auto generation, added sdk.py/controllers creation * missed enable forecasting * added running black in subprocess after sdk files generation completes * removed deleted sdk_arg_logger * comment out tests * property doc fix * clean up * Update generate_sdk.py * make trailmap classes useable for doc generation * Update generate_sdk.py * added lineon to trailmap class for linking to func in markdown * changed lineon to dict * added full_path to trailmap for linking in docs * updated portfolio * feat: initial files * feat: added meta head * feat: added funcdef * added func_def to trailmap attributes for markdown in docs, added missing type hints to covid functions * feat: added view and merged with jaun * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * init * fix returns * fix: random stuff * fix: random * fixed encoding issue on windows * fix: generate tabs * update * Update generate_sdk_markdown.py * Create .pydocstyle.ini * added type hint classes for views * fixes * alt, ba * alt-economy * Update finviz_compare_model.py * fixs * Update substack_model.py * Update generate_sdk.py * last of my section * porfolio * po * Update optimizer_model.py * fixing more things * few more * keys done * update * fixes * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * mypy forecast fix * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * fixes * forecast fixes * one more fix * Update coinbase_model.py * Update generate_sdk_markdown.py Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: James Maslek <[email protected]> Co-authored-by: jose-donato <[email protected]> Co-authored-by: andrewkenreich <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_categories(self) -> Union[Dict[Any, Any], List[Any]]: return self.coin.get("categories", {})
33
pycoingecko_model.py
Python
openbb_terminal/cryptocurrency/due_diligence/pycoingecko_model.py
59d8b36bb0467a1a99513b10e8b8471afaa56fd6
OpenBBTerminal
1
156,683
30
11
9
61
6
0
32
132
codes
Add ``AttributeNotImplementedError`` for properties so IPython glob search works (#9231)
https://github.com/dask/dask.git
def codes(self): if not self.known: msg = ( "`df.column.cat.codes` with unknown categories is not " "supported. Please use `column.cat.as_known()` or " "`df.categorize()` beforehand to ensure known categories" ) raise AttributeNotImplementedError(msg) return self._property_map("codes")
31
categorical.py
Python
dask/dataframe/categorical.py
ecbab9d7cb52a2d96cb1b8dc397a87811e6f8059
dask
2
276,382
50
19
30
313
26
0
75
333
generate_combinations_with_testcase_name
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def generate_combinations_with_testcase_name(**kwargs): sort_by_key = lambda k: k[0] combinations = [] for key, values in sorted(kwargs.items(), key=sort_by_key): if not isinstance(values, list): values = [values] combinations.append([(key, value) for value in values]) combinations = [ collections.OrderedDict(result) for result in itertools.product(*combinations) ] named_combinations = [] for combination in combinations: assert isinstance(combination, collections.OrderedDict) name = "".join( [ "_{}_{}".format( "".join(filter(str.isalnum, key)), "".join(filter(str.isalnum, str(value))), ) for key, value in combination.items() ] ) named_combinations.append( collections.OrderedDict( list(combination.items()) + [("testcase_name", "_test{}".format(name))] ) ) return named_combinations
194
test_utils.py
Python
keras/testing_infra/test_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
7
290,665
77
14
31
470
34
0
116
437
_parse_routing_response
Use HERE API v8 (#80892) * Use HERE API v8 Signed-off-by: Kevin Stillhammer <[email protected]> * Add migration Signed-off-by: Kevin Stillhammer <[email protected]> * Catch correct voluptuous error Signed-off-by: Kevin Stillhammer <[email protected]> * Use list comprehension for transit values * Add migration alternative Signed-off-by: Kevin Stillhammer <[email protected]>
https://github.com/home-assistant/core.git
def _parse_routing_response(self, response) -> HERETravelTimeData: section: dict = response["routes"][0]["sections"][0] summary: dict = section["summary"] mapped_origin_lat: float = section["departure"]["place"]["location"]["lat"] mapped_origin_lon: float = section["departure"]["place"]["location"]["lng"] mapped_destination_lat: float = section["arrival"]["place"]["location"]["lat"] mapped_destination_lon: float = section["arrival"]["place"]["location"]["lng"] distance: float = summary["length"] if self.config.units == IMPERIAL_UNITS: # Convert to miles. distance = DistanceConverter.convert(distance, LENGTH_METERS, LENGTH_MILES) else: # Convert to kilometers distance = distance / 1000 origin_name: str | None = None if (names := section["spans"][0].get("names")) is not None: origin_name = names[0]["value"] destination_name: str | None = None if (names := section["spans"][-1].get("names")) is not None: destination_name = names[0]["value"] return HERETravelTimeData( { ATTR_ATTRIBUTION: None, ATTR_DURATION: round(summary["baseDuration"] / 60), # type: ignore[misc] ATTR_DURATION_IN_TRAFFIC: round(summary["duration"] / 60), ATTR_DISTANCE: distance, ATTR_ORIGIN: f"{mapped_origin_lat},{mapped_origin_lon}", ATTR_DESTINATION: f"{mapped_destination_lat},{mapped_destination_lon}", ATTR_ORIGIN_NAME: origin_name, ATTR_DESTINATION_NAME: destination_name, } )
270
coordinator.py
Python
homeassistant/components/here_travel_time/coordinator.py
aedbfdabee67e3e7b915b842d28f151a08cb3d7f
core
4
20,353
83
14
24
366
44
0
107
371
format
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def format(self, tokensource, outfile): self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( 'RGB', self._get_image_size(self.maxlinelength, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) draw = ImageDraw.Draw(im) # Highlight if self.hl_lines: x = self.image_pad + self.line_number_width - self.line_number_pad + 1 recth = self._get_line_height() rectw = im.size[0] - x for linenumber in self.hl_lines: y = self._get_line_y(linenumber - 1) draw.rectangle([(x, y), (x + rectw, y + recth)], fill=self.hl_color) for pos, value, font, text_fg, text_bg in self.drawables: if text_bg: text_size = draw.textsize(text=value, font=font) draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg) draw.text(pos, value, font=font, fill=text_fg) im.save(outfile, self.image_format.upper()) # Add one formatter per format, so that the "-f gif" option gives the correct result # when used in pygmentize.
244
img.py
Python
pipenv/patched/notpip/_vendor/pygments/formatters/img.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
5
180,363
22
13
9
119
14
0
31
102
call_function
Callable blocks (#1437) * format backend * blocks callable * call blocks * format * fixed upload * fix mix * formatting * formatting * formatting * added serialization/deserialization for video * formatting * blocks * formatting * fix tests * formatting
https://github.com/gradio-app/gradio.git
async def call_function(self, fn_index, processed_input): block_fn = self.fns[fn_index] start = time.time() if inspect.iscoroutinefunction(block_fn.fn): prediction = await block_fn.fn(*processed_input) else: prediction = await run_in_threadpool(block_fn.fn, *processed_input) duration = time.time() - start return prediction, duration
72
blocks.py
Python
gradio/blocks.py
71bcfdbe929c83c10d761018042078041e956b81
gradio
2
113,058
47
12
22
236
27
1
62
194
test_multi_trial
[retiarii] fix experiment does not exit after done (#4916)
https://github.com/microsoft/nni.git
def test_multi_trial(): evaluator_kwargs = { 'max_epochs': 1 } to_test = [ # (model, evaluator) _mnist_net('simple', evaluator_kwargs), _mnist_net('simple_value_choice', evaluator_kwargs), _mnist_net('value_choice', evaluator_kwargs), _mnist_net('repeat', evaluator_kwargs), _mnist_net('custom_op', evaluator_kwargs), ] for base_model, evaluator in to_test: search_strategy = strategy.Random() exp = RetiariiExperiment(base_model, evaluator, strategy=search_strategy) exp_config = RetiariiExeConfig('local') exp_config.experiment_name = 'mnist_unittest' exp_config.trial_concurrency = 1 exp_config.max_trial_number = 1 exp_config.training_service.use_active_gpu = False exp.run(exp_config, 8080) assert isinstance(exp.export_top_models()[0], dict) exp.stop() python_script = @pytest.mark.timeout(600)
@pytest.mark.timeout(600)
131
test_multitrial.py
Python
test/ut/retiarii/test_multitrial.py
2bc984412c0fec3dae84e6f6a8253a615d2b6ebd
nni
2
260,489
90
15
31
331
41
0
123
439
fit
MAINT validate parameter in `EmpiricalCovariance`, `MinCovDet`, and `EllipticEnvelope` (#23842) Co-authored-by: Jérémie du Boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None): self._validate_params() X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet") random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn( "The covariance matrix associated to your dataset is not full rank" ) # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state, ) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance( X[raw_support], assume_centered=True ) # get precision matrix in an optimized way precision = linalg.pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self
211
_robust_covariance.py
Python
sklearn/covariance/_robust_covariance.py
47dd54f727f09a47e84fb4cb53d33f9b0f239edb
scikit-learn
3
101,691
6
6
4
24
3
0
6
20
thumbnails
Alignments update: - Store face embeddings in PNG header when sorting - typing + refactor - Update alignments keys for 'identity' and 'video_meta' + bump to v2.3 - General typing fixes
https://github.com/deepfakes/faceswap.git
def thumbnails(self) -> "Thumbnails": return self._thumbnails
12
alignments.py
Python
lib/align/alignments.py
e5356a417e7c2124e75c4a2994ed604fc0a3cc74
faceswap
1
129,565
62
13
25
350
28
0
100
388
test_apex_dqn_compilation_and_per_worker_epsilon_values
[RLlib] Preparatory PR for multi-agent multi-GPU learner (alpha-star style) #03 (#21652)
https://github.com/ray-project/ray.git
def test_apex_dqn_compilation_and_per_worker_epsilon_values(self): config = apex.APEX_DEFAULT_CONFIG.copy() config["num_workers"] = 3 config["num_gpus"] = 0 config["learning_starts"] = 1000 config["prioritized_replay"] = True config["timesteps_per_iteration"] = 100 config["min_time_s_per_reporting"] = 1 config["optimizer"]["num_replay_buffer_shards"] = 1 for _ in framework_iterator(config, with_eager_tracing=True): plain_config = config.copy() trainer = apex.ApexTrainer(config=plain_config, env="CartPole-v0") # Test per-worker epsilon distribution. infos = trainer.workers.foreach_policy( lambda p, _: p.get_exploration_state()) expected = [0.4, 0.016190862, 0.00065536] check([i["cur_epsilon"] for i in infos], [0.0] + expected) check_compute_single_action(trainer) for i in range(2): results = trainer.train() check_train_results(results) print(results) # Test again per-worker epsilon distribution # (should not have changed). infos = trainer.workers.foreach_policy( lambda p, _: p.get_exploration_state()) check([i["cur_epsilon"] for i in infos], [0.0] + expected) trainer.stop()
220
test_apex_dqn.py
Python
rllib/agents/dqn/tests/test_apex_dqn.py
d5bfb7b7da6f8ec505dd8ed69f0be419decfdcc0
ray
5
303,357
51
13
24
246
21
0
63
307
async_update
Remove attribution from extra state attributes (#76172)
https://github.com/home-assistant/core.git
async def async_update(self): try: await self.device.update() if self._removed: _LOGGER.debug("%s reacquired", self.name) self._removed = False except AgentError: # server still available - camera error if self.device.client.is_available and not self._removed: _LOGGER.error("%s lost", self.name) self._removed = True self._attr_icon = "mdi:camcorder-off" if self.is_on: self._attr_icon = "mdi:camcorder" self._attr_available = self.device.client.is_available self._attr_extra_state_attributes = { "editable": False, "enabled": self.is_on, "connected": self.connected, "detected": self.is_detected, "alerted": self.is_alerted, "has_ptz": self.device.has_ptz, "alerts_enabled": self.device.alerts_active, }
148
camera.py
Python
homeassistant/components/agent_dvr/camera.py
22eba6ce1ba3611421c526b61432ac90592eff08
core
6
298,007
38
12
19
126
10
0
51
272
_update_effect_list
String formatting and max line length - Part 6 (#84525)
https://github.com/home-assistant/core.git
def _update_effect_list(self, effect_list): if effect_list in (None, "None", ""): self._effect_list = None return if not isinstance(effect_list, list): _LOGGER.error( ( "Received invalid effect list: %s for entity %s. Expected list of" " strings" ), effect_list, self.entity_id, ) self._effect_list = None return if len(effect_list) == 0: self._effect_list = None return self._effect_list = effect_list
75
light.py
Python
homeassistant/components/template/light.py
8819634b613f6bfd55885283bab86c3852ae40c4
core
4
26,067
14
8
4
50
7
0
17
38
test_is_user_address_limit_reached_false
Limit number of user addresses (#9205) * Add migration for clearing user addresses * Adjust checkout flow - do not save address for client with more than max addresses value * Remove the oldest user address when new address above limit is added
https://github.com/saleor/saleor.git
def test_is_user_address_limit_reached_false(customer_user, address): # given customer_user.addresses.set([address]) # when limit_reached = is_user_address_limit_reached(customer_user) # then assert limit_reached is False
28
test_utils.py
Python
saleor/account/tests/test_utils.py
9ebd28ab8a3b7f6c341ef8474820ab225548fe0e
saleor
1
212,377
19
13
8
81
13
0
22
72
port
12089 support unix socket (#12091) * Support unix socket with bokeh server This commit adds a command line argument to bokeh serve to allow bokeh server to listen to a unix socket. Usage: bokeh serve --unix-socket /path/to/socket Reference: https://github.com/bokeh/bokeh/issues/12089 * Fix failing tests * Fix test * Address review comments * Add end-to-end testing * Update env * Remove temp files * Fix failing testS * Fix test * Fix mypy fail * Address review comments * Fix tests * Fix tests * Address review comments * Fix tests * Fix tests * Reduce sleep for unit tests * Fix test for windows * Use absolute path * Disable test in windows * Fix code quality tests * Raise error on win32 * Fix tests * Fix tests * Address review comments * Fix tests * Fix mypy fails * Fix tests
https://github.com/bokeh/bokeh.git
def port(self) -> int | None: sock = next( sock for sock in self._http._sockets.values() if sock.family in (socket.AF_INET, socket.AF_INET6) ) return sock.getsockname()[1]
51
server.py
Python
bokeh/server/server.py
dd9668e0b61d961c9e4bb76d9d0951d4f82e1dad
bokeh
3
129,371
30
16
30
162
8
0
47
393
testAutoscalingNewNode
Tune test autoscaler / fix stale node detection bug (#21516) See #21458. Currently, Tune keeps its own list of alive node IPs, but this information is only updated every 10 seconds and is usually stale when a new node is added. Because of this, the first trial scheduled on this node is usually marked as failed. This PR adds a test confirming this behavior and gets rid of the unneeded code path. Co-authored-by: Xiaowei Jiang <[email protected]> Co-authored-by: xwjiang2010 <[email protected]>
https://github.com/ray-project/ray.git
def testAutoscalingNewNode(self): self.cluster.update_config({ "provider": { "head_resources": { "CPU": 4, "GPU": 0 } }, "available_node_types": { "ray.worker.cpu": { "resources": { "CPU": 4 }, "min_workers": 0, # No minimum nodes "max_workers": 2, }, "ray.worker.gpu": { "min_workers": 0, "max_workers": 0, # No GPU nodes } }, }) self.cluster.start() self.cluster.connect(client=True, timeout=120)
114
test_multinode_sync.py
Python
python/ray/tune/tests/test_multinode_sync.py
8fd5b7a5a80e88f7f70c4ebb179129c8eed65b72
ray
1
93,204
110
11
60
694
41
0
176
581
test_issue_message_builder
test(msteams): Add tests for building group card (#36834) Add tests for build_group_card which builds issues cards. Does NOT test all visual aspects of the card. Only ensures that certain important elements are present and the basic structure of the card is correct.
https://github.com/getsentry/sentry.git
def test_issue_message_builder(self): self.event1.data["metadata"].update({"value": "some error"}) self.group1.data["metadata"].update({"value": "some error"}) self.event1.data["type"] = self.group1.data["type"] = "error" issue_card = build_group_card( group=self.group1, event=self.event1, rules=self.rules, integration=self.integration ) body = issue_card["body"] assert 4 == len(body) title = body[0] assert "oh no" in title["text"] assert TextSize.LARGE == title["size"] assert TextWeight.BOLDER == title["weight"] description = body[1] assert "some error" == description["text"] assert TextWeight.BOLDER == description["weight"] footer = body[2] assert "ColumnSet" == footer["type"] assert 3 == len(footer["columns"]) logo = footer["columns"][0]["items"][0] assert "20px" == logo["height"] issue_id_and_rule = footer["columns"][1]["items"][0] assert self.group1.qualified_short_id in issue_id_and_rule["text"] assert "rule1" in issue_id_and_rule["text"] assert "+1 other" in issue_id_and_rule["text"] date = footer["columns"][2]["items"][0] assert ( re.match( r, date["text"], re.VERBOSE, ) is not None ) actions_container = body[3] assert "Container" == actions_container["type"] action_set = actions_container["items"][0] assert "ActionSet" == action_set["type"] actions = action_set["actions"] for action in actions: assert ActionType.SHOW_CARD == action["type"] card_body = action["card"]["body"] assert 1 <= len(card_body) assert "Input.ChoiceSet" == card_body[-1]["type"] resolve_action, ignore_action, assign_action = actions assert "Resolve" == resolve_action["title"] assert "Ignore" == ignore_action["title"] assert "Assign" == assign_action["title"] # Check if card is serializable to json card_json = json.dumps(issue_card) assert card_json[0] == "{" and card_json[-1] == "}"
402
test_message_builder.py
Python
tests/sentry/integrations/msteams/test_message_builder.py
db35e231ceababe8c9f5ca7b5d2ca685f07c7d5b
sentry
3
267,974
6
7
3
28
4
0
6
20
id
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def id(self) -> str: return self.inspection['Id']
15
docker_util.py
Python
test/lib/ansible_test/_internal/docker_util.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
1
19,966
39
14
16
194
20
0
49
238
rollback
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def rollback(self) -> None: for p in self._moves: logger.info("Moving to %s\n from %s", *p) for new_path, path in self._moves: try: logger.debug("Replacing %s from %s", new_path, path) if os.path.isfile(new_path) or os.path.islink(new_path): os.unlink(new_path) elif os.path.isdir(new_path): rmtree(new_path) renames(path, new_path) except OSError as ex: logger.error("Failed to restore %s", new_path) logger.debug("Exception: %s", ex) self.commit()
116
req_uninstall.py
Python
pipenv/patched/notpip/_internal/req/req_uninstall.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
7
100,929
43
13
13
157
12
0
52
189
_get_tooltip
Core updates - Change loss loading mechanism - Autosize tooltips based on content size - Random linting + code modernisation
https://github.com/deepfakes/faceswap.git
def _get_tooltip(widget, text=None, text_variable=None): _RECREATE_OBJECTS["tooltips"][str(widget)] = {"text": text, "text_variable": text_variable} logger.debug("Adding to tooltips dict: (widget: %s. text: '%s')", widget, text) wrap_length = 400 if text is not None: while True: if len(text) < wrap_length * 5: break if wrap_length > 720: break wrap_length = int(wrap_length * 1.10) return Tooltip(widget, text=text, text_variable=text_variable, wrap_length=wrap_length)
100
control_helper.py
Python
lib/gui/control_helper.py
bad5025aea1adb9126580e14e064e6c99089243d
faceswap
5
271,893
6
8
2
27
3
0
6
12
is_feature_layer
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def is_feature_layer(layer): return getattr(layer, "_is_feature_layer", False)
15
training_utils_v1.py
Python
keras/engine/training_utils_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
152,834
23
9
9
100
13
0
24
56
get_deepbooru_tags
refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return["value"] = -1 shared.deepbooru_process_queue.put(pil_image) while shared.deepbooru_process_return["value"] == -1: time.sleep(0.2) release_process() return ret
61
deepbooru.py
Python
modules/deepbooru.py
1f92336be768d235c18a82acb2195b7135101ae7
stable-diffusion-webui
2
274,533
50
17
15
160
24
0
59
246
__call__
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def __call__(self, y_true, y_pred, sample_weight=None): # If we are wrapping a lambda function strip '<>' from the name as it is not # accepted in scope name. graph_ctx = tf_utils.graph_context_for_symbolic_tensors( y_true, y_pred, sample_weight ) with backend.name_scope(self._name_scope), graph_ctx: if tf.executing_eagerly(): call_fn = self.call else: call_fn = tf.__internal__.autograph.tf_convert( self.call, tf.__internal__.autograph.control_status_ctx() ) losses = call_fn(y_true, y_pred) return losses_utils.compute_weighted_loss( losses, sample_weight, reduction=self._get_reduction() )
101
losses.py
Python
keras/losses.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
107,434
32
9
24
84
8
0
38
71
use
Rename outdated seaborn styles. They are kept available under a versioned name for backcompat.
https://github.com/matplotlib/matplotlib.git
def use(style): if isinstance(style, (str, Path)) or hasattr(style, 'keys'): # If name is a single str, Path or dict, make it a single element list. styles = [style] else: styles = style style_alias = {'mpl20': 'default', 'mpl15': 'classic'}
144
core.py
Python
lib/matplotlib/style/core.py
06c92b8a8b81bbdacc3baf7ff0f1566c53fbe1b0
matplotlib
8
256,254
126
16
35
328
27
0
226
629
_words_to_tokens
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def _words_to_tokens(words, word_offsets, tokenizer): tokens = [] token_offsets = [] start_of_word = [] idx = 0 for w, w_off in zip(words, word_offsets): idx += 1 if idx % 500000 == 0: logger.info(idx) # Get (subword) tokens of single word. # empty / pure whitespace if len(w) == 0: continue # For the first word of a text: we just call the regular tokenize function. # For later words: we need to call it with add_prefix_space=True to get the same results with roberta / gpt2 tokenizer # see discussion here. https://github.com/huggingface/transformers/issues/1196 elif len(tokens) == 0: tokens_word = tokenizer.tokenize(w) else: if type(tokenizer) == RobertaTokenizer: tokens_word = tokenizer.tokenize(w, add_prefix_space=True) else: tokens_word = tokenizer.tokenize(w) # Sometimes the tokenizer returns no tokens if len(tokens_word) == 0: continue tokens += tokens_word # get global offset for each token in word + save marker for first tokens of a word first_tok = True for tok in tokens_word: token_offsets.append(w_off) # Depending on the tokenizer type special chars are added to distinguish tokens with preceeding # whitespace (=> "start of a word"). We need to get rid of these to calculate the original length of the token orig_tok = re.sub(SPECIAL_TOKENIZER_CHARS, "", tok) # Don't use length of unk token for offset calculation if orig_tok == tokenizer.special_tokens_map["unk_token"]: w_off += 1 else: w_off += len(orig_tok) if first_tok: start_of_word.append(True) first_tok = False else: start_of_word.append(False) return tokens, token_offsets, start_of_word
195
tokenization.py
Python
haystack/modeling/model/tokenization.py
a59bca366174d9c692fa19750c24d65f47660ef7
haystack
10
291,784
72
13
45
567
22
0
162
705
handle_event
Create a UUID from given LG soundbar device name (#81918) Co-authored-by: Martin Hjelmare <[email protected]> Fixes https://github.com/home-assistant/core/issues/77524 fixes undefined
https://github.com/home-assistant/core.git
def handle_event(self, response): data = response["data"] if "data" in response else {} if response["msg"] == "EQ_VIEW_INFO": if "i_bass" in data: self._bass = data["i_bass"] if "i_treble" in data: self._treble = data["i_treble"] if "ai_eq_list" in data: self._equalisers = data["ai_eq_list"] if "i_curr_eq" in data: self._equaliser = data["i_curr_eq"] elif response["msg"] == "SPK_LIST_VIEW_INFO": if "i_vol" in data: self._volume = data["i_vol"] if "i_vol_min" in data: self._volume_min = data["i_vol_min"] if "i_vol_max" in data: self._volume_max = data["i_vol_max"] if "b_mute" in data: self._mute = data["b_mute"] if "i_curr_func" in data: self._function = data["i_curr_func"] elif response["msg"] == "FUNC_VIEW_INFO": if "i_curr_func" in data: self._function = data["i_curr_func"] if "ai_func_list" in data: self._functions = data["ai_func_list"] elif response["msg"] == "SETTING_VIEW_INFO": if "i_rear_min" in data: self._rear_volume_min = data["i_rear_min"] if "i_rear_max" in data: self._rear_volume_max = data["i_rear_max"] if "i_rear_level" in data: self._rear_volume = data["i_rear_level"] if "i_woofer_min" in data: self._woofer_volume_min = data["i_woofer_min"] if "i_woofer_max" in data: self._woofer_volume_max = data["i_woofer_max"] if "i_woofer_level" in data: self._woofer_volume = data["i_woofer_level"] if "i_curr_eq" in data: self._equaliser = data["i_curr_eq"] if "s_user_name" in data: self._attr_name = data["s_user_name"] self.schedule_update_ha_state()
305
media_player.py
Python
homeassistant/components/lg_soundbar/media_player.py
4167edc52d92577d9bceb800d6699167a1a8b4c8
core
25
266,053
6
6
2
20
4
0
6
20
prep_related_object_data
4347 Add JSON/YAML import support for all objects (#10367) * 4347 initial code for json import * 4347 initial code for json import * Clean up form processing logic * Consolidate import forms * Consolidate object import/update logic * Clean up bulk import view Co-authored-by: jeremystretch <[email protected]>
https://github.com/netbox-community/netbox.git
def prep_related_object_data(self, parent, data): return data
12
bulk_views.py
Python
netbox/netbox/views/generic/bulk_views.py
93e7457e0d84ad24cba22cc5c0811777ddebf94e
netbox
1
102,252
40
11
120
198
25
0
54
108
infer_concrete_type_builder
Per-overload torch.ops API (#67254) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/67254 Fixes https://github.com/pytorch/pytorch/issues/65997 TODO: disallow `default` as an overload name for aten operators. BC breaking: `output = torch.ops._test.leaky_relu(self=torch.tensor(-1.0))` now fails with the error `TypeError: __call__() got multiple values for argument 'self'` since we call into `OpOverloadBundle`'s `__call__` method that has `self` bound to it as its first argument. cc ezyang gchanan Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33262228 Pulled By: anjali411 fbshipit-source-id: 600dbf511514ea9b41aea3e6b1bc1102dab08909
https://github.com/pytorch/pytorch.git
def infer_concrete_type_builder(nn_module, share_types=True): concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module)) if isinstance(nn_module, (torch.nn.ModuleDict)): concrete_type_builder.set_module_dict() if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)): concrete_type_builder.set_module_list() class_annotations = getattr(nn_module, '__annotations__', {}) if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)): class_annotations = {} # Get user-annotated ignored attributes. user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list()) concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes) ignored_properties = jit_ignored_properties(nn_module) # try to infer the type from type annotation or from the object itself
846
_recursive.py
Python
torch/jit/_recursive.py
8e6d1738a41cb28c36db3dc5e10fd812417673de
pytorch
37
293,923
27
13
15
126
14
0
35
113
test_from_event_to_delete_state
Convert unindexed domain queries to entity_id queries (#68404)
https://github.com/home-assistant/core.git
def test_from_event_to_delete_state(): event = ha.Event( EVENT_STATE_CHANGED, { "entity_id": "sensor.temperature", "old_state": ha.State("sensor.temperature", "18"), "new_state": None, }, ) db_state = States.from_event(event) assert db_state.entity_id == "sensor.temperature" assert db_state.state == "" assert db_state.last_changed == event.time_fired assert db_state.last_updated == event.time_fired
78
test_models.py
Python
tests/components/recorder/test_models.py
3150915cb72df2ca94a67fa29ebc9164d8deabf1
core
1
120,239
145
14
34
580
55
1
218
280
_lstsq
remove numpy.linalg._promote_arg_dtypes in favor of numpy.util._promote_dtypes_inexact
https://github.com/google/jax.git
def _lstsq(a, b, rcond, *, numpy_resid=False): # TODO: add lstsq to lax_linalg and implement this function via those wrappers. # TODO: add custom jvp rule for more robust lstsq differentiation a, b = _promote_dtypes_inexact(a, b) if a.shape[0] != b.shape[0]: raise ValueError("Leading dimensions of input arrays must match") b_orig_ndim = b.ndim if b_orig_ndim == 1: b = b[:, None] if a.ndim != 2: raise TypeError( f"{a.ndim}-dimensional array given. Array must be two-dimensional") if b.ndim != 2: raise TypeError( f"{b.ndim}-dimensional array given. Array must be one or two-dimensional") m, n = a.shape dtype = a.dtype if rcond is None: rcond = jnp.finfo(dtype).eps * max(n, m) else: rcond = jnp.where(rcond < 0, jnp.finfo(dtype).eps, rcond) u, s, vt = svd(a, full_matrices=False) mask = s >= rcond * s[0] rank = mask.sum() safe_s = jnp.where(mask, s, 1) s_inv = jnp.where(mask, 1 / safe_s, 0)[:, jnp.newaxis] uTb = jnp.matmul(u.conj().T, b, precision=lax.Precision.HIGHEST) x = jnp.matmul(vt.conj().T, s_inv * uTb, precision=lax.Precision.HIGHEST) # Numpy returns empty residuals in some cases. To allow compilation, we # default to returning full residuals in all cases. if numpy_resid and (rank < n or m <= n): resid = jnp.asarray([]) else: b_estimate = jnp.matmul(a, x, precision=lax.Precision.HIGHEST) resid = norm(b - b_estimate, axis=0) ** 2 if b_orig_ndim == 1: x = x.ravel() return x, resid, rank, s _jit_lstsq = jit(partial(_lstsq, numpy_resid=False)) @_wraps(np.linalg.lstsq, lax_description=textwrap.dedent())
@_wraps(np.linalg.lstsq, lax_description=textwrap.dedent("""\ It has two important differences: 1. In `numpy.linalg.lstsq`, the default `rcond` is `-1`, and warns that in the future the default will be `None`. Here, the default rcond is `None`. 2. In `np.linalg.lstsq` the returned residuals are empty for low-rank or over-determined solutions. Here, the residuals are returned in all cases, to make the function compatible with jit. The non-jit compatible numpy behavior can be recovered by passing numpy_resid=True. The lstsq function does not currently have a custom JVP rule, so the gradient is poorly behaved for some inputs, particularly for low-rank `a`. """))
336
linalg.py
Python
jax/_src/numpy/linalg.py
bb2682db6df5b9388ce0b161e3f449624238718b
jax
10
19,913
17
10
18
74
9
0
22
97
installed_location
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def installed_location(self) -> Optional[str]: egg_link = egg_link_path_from_location(self.raw_name) if egg_link: location = egg_link elif self.location: location = self.location else: return None return normalize_path(location)
44
base.py
Python
pipenv/patched/notpip/_internal/metadata/base.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
3
188,416
61
16
15
214
22
0
74
154
validate_and_return_id_token
Fix rbac (#7699) * perf: 优化 suggesstion * perf: 修改 migrations * feat: 添加OIDC认证逻辑 * perf: 修改 backend * perf: 优化认证backends * perf: 优化认证backends * perf: 优化CAS认证, 用户多域名进行访问时回调到各自域名 Co-authored-by: ibuler <[email protected]>
https://github.com/jumpserver/jumpserver.git
def validate_and_return_id_token(jws, nonce=None, validate_nonce=True): log_prompt = "Validate ID Token: {}" logger.debug(log_prompt.format('Get shared key')) shared_key = settings.AUTH_OPENID_CLIENT_ID \ if settings.AUTH_OPENID_PROVIDER_SIGNATURE_ALG == 'HS256' \ else settings.AUTH_OPENID_PROVIDER_SIGNATURE_KEY # RS256 try: # Decodes the JSON Web Token and raise an error if the signature is invalid. logger.debug(log_prompt.format('Verify compact jwk')) id_token = JWS().verify_compact(force_bytes(jws), _get_jwks_keys(shared_key)) except JWKESTException as e: logger.debug(log_prompt.format('Verify compact jwkest exception: {}'.format(str(e)))) return # Validates the claims embedded in the id_token. logger.debug(log_prompt.format('Validate claims')) _validate_claims(id_token, nonce=nonce, validate_nonce=validate_nonce) return id_token
126
utils.py
Python
apps/authentication/backends/oidc/utils.py
edfca5eb2486c2f006257723ffeda6f56b170170
jumpserver
3
153,580
37
11
9
140
13
0
51
137
aggregate
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
https://github.com/modin-project/modin.git
def aggregate(self, func=None, axis=0, *args, **kwargs): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) result = None if axis == 0: result = self._aggregate(func, _axis=axis, *args, **kwargs) # TODO: handle case when axis == 1 if result is None: kwargs.pop("is_transform", None) return self.apply(func, axis=axis, args=args, **kwargs) return result agg = aggregate
87
base.py
Python
modin/pandas/base.py
605efa618e7994681f57b11d04d417f353ef8d50
modin
3
288,763
10
9
5
38
6
0
10
42
open_position
Powerview refactor prep for all shade types (#79862)
https://github.com/home-assistant/core.git
def open_position(self) -> PowerviewShadeMove: return PowerviewShadeMove( self._shade.open_position, {POS_KIND_VANE: MIN_POSITION} )
23
cover.py
Python
homeassistant/components/hunterdouglas_powerview/cover.py
3ab294e8efc00c9f3cda2993318bb582ba675f8c
core
1
282,602
19
12
5
104
8
0
27
43
lambda_int_or_round_float
adjusted format of logs (#1292) adjusted format of logs
https://github.com/OpenBB-finance/OpenBBTerminal.git
def lambda_int_or_round_float(x) -> str: if (x - int(x) < -sys.float_info.epsilon) or (x - int(x) > sys.float_info.epsilon): return " " + str(round(x, 2)) return " " + str(int(x))
62
helper_funcs.py
Python
gamestonk_terminal/helper_funcs.py
fd5821928265429d1ffb6e6d53f019915b3afbbc
OpenBBTerminal
3
293,605
6
8
3
27
4
0
6
20
extra_restore_state_data
Restore state of trigger based template binary sensor (#67538)
https://github.com/home-assistant/core.git
def extra_restore_state_data(self) -> AutoOffExtraStoredData: return AutoOffExtraStoredData(self._auto_off_time)
15
binary_sensor.py
Python
homeassistant/components/template/binary_sensor.py
7fc0ffd5c591429cef805aca707acdda0ca304e6
core
1
19,288
55
17
23
332
30
0
76
418
planning
Add optional robot radius to RRT/RRTStar path planners (#655) * Add optional robot radius to RRT/RRTStar path planners. * update __init__ and check_collision to include radius * during animation, if a robot radius is given then it is drawn * Add test for robot radius * Correct import error * Correct missing robot_radius errors * Address "expected 2 blank lines, found 1" error * Address "line too long" errors * Add missing argument description. * Remove collision_check_with_xy and replace with check_collision * Fix "missing whitespace after ','" error * Update PathPlanning/ClosedLoopRRTStar/closed_loop_rrt_star_car.py Co-authored-by: Atsushi Sakai <[email protected]> Co-authored-by: Atsushi Sakai <[email protected]>
https://github.com/AtsushiSakai/PythonRobotics.git
def planning(self, animation=True): self.node_list = [self.start] for i in range(self.max_iter): rnd_node = self.get_random_node() nearest_ind = self.get_nearest_node_index(self.node_list, rnd_node) nearest_node = self.node_list[nearest_ind] new_node = self.steer(nearest_node, rnd_node, self.expand_dis) if self.check_if_outside_play_area(new_node, self.play_area) and \ self.check_collision( new_node, self.obstacle_list, self.robot_radius): self.node_list.append(new_node) if animation and i % 5 == 0: self.draw_graph(rnd_node) if self.calc_dist_to_goal(self.node_list[-1].x, self.node_list[-1].y) <= self.expand_dis: final_node = self.steer(self.node_list[-1], self.end, self.expand_dis) if self.check_collision( final_node, self.obstacle_list, self.robot_radius): return self.generate_final_course(len(self.node_list) - 1) if animation and i % 5: self.draw_graph(rnd_node) return None # cannot find path
218
rrt.py
Python
PathPlanning/RRT/rrt.py
b53fdf75f66ccb63b5cfaadaa81253d43f01805a
PythonRobotics
10
125,798
3
8
2
26
4
0
3
17
testTerminateUnhealthyWorkers
[autoscaler][kuberay] Disable autoscaler health check and drain functionality (#26764) Signed-off-by: Dmitri Gekhtman <[email protected]> For KubeRay, Disables autoscaler's RPC drain of worker nodes prior to termination. Disables autoscaler's termination of nodes disconnected from the GCS.
https://github.com/ray-project/ray.git
def testTerminateUnhealthyWorkers(self): self.unhealthyWorkerHelper(disable_liveness_check=False)
14
test_autoscaler.py
Python
python/ray/tests/test_autoscaler.py
b2b11316cd0be45857b157d39ab4a89ffaf059b3
ray
1
152,690
3
6
6
15
3
0
3
6
get_learned_conditioning_prompt_schedules
prompt_parser: allow spaces in schedules, add test, log/ignore errors Only build the parser once (at import time) instead of for each step. doctest is run by simply executing modules/prompt_parser.py
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
def get_learned_conditioning_prompt_schedules(prompts, steps):
42
prompt_parser.py
Python
modules/prompt_parser.py
90e911fd546e76f879b38a764473569911a0f845
stable-diffusion-webui
3
256,283
17
15
17
94
11
0
19
80
get_nodes_by_class
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def get_nodes_by_class(self, class_type) -> List[Any]: matches = [ self.graph.nodes.get(node)["component"] for node in self.graph.nodes if isinstance(self.graph.nodes.get(node)["component"], class_type) ] return matches
59
base.py
Python
haystack/pipelines/base.py
a59bca366174d9c692fa19750c24d65f47660ef7
haystack
3
244,036
19
8
10
88
9
0
19
117
info
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
https://github.com/open-mmlab/mmdetection.git
def info(self): return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_masks': self.pos_masks, 'neg_masks': self.neg_masks, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, }
51
mask_sampling_result.py
Python
mmdet/core/bbox/samplers/mask_sampling_result.py
cac356380d505bf15587f07c0529218cc36b9652
mmdetection
1
260,080
8
8
9
34
7
0
9
18
test_calibration_without_sample_weight_base_estimator
API Rename base_estimator in CalibratedClassifierCV (#22054) Co-authored-by: Kevin Roice <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_calibration_without_sample_weight_base_estimator(data): X, y = data sample_weight = np.ones_like(y)
58
test_calibration.py
Python
sklearn/tests/test_calibration.py
effdd6e215c67f2ae8ed1e378ea1661e936059a4
scikit-learn
1
4,306
107
19
42
514
24
0
178
524
test_search_based_stream_should_not_attempt_to_get_more_than_10k_records
🐛 Source Hubspot: Handled 10K+ search-endpoint queries (#10700) * Handled search queries that would output more than 10K records * Getting CRM search objects in ascending chronological ortder * Fixed stream * Fixed rebase * Fixed condition * Added unit test * Removed unused import * Started a new query when reached 10K records * Moved comment
https://github.com/airbytehq/airbyte.git
def test_search_based_stream_should_not_attempt_to_get_more_than_10k_records(requests_mock, common_params, fake_properties_list): responses = [ { "json": { "results": [{"id": f"{y}", "updatedAt": "2022-02-25T16:43:11Z"} for y in range(100)], "paging": {"next": {"after": f"{x*100}",}} }, "status_code": 200, } for x in range(1, 101) ] # After reaching 10K records, it performs a new search query. responses.extend([ { "json": { "results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(100)], "paging": {"next": {"after": f"{x*100}",}} }, "status_code": 200, } for x in range(1, 10) ]) # Last page... it does not have paging->next->after responses.append({ "json": { "results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(100)], "paging": {} }, "status_code": 200, }) properties_response = [{ "json": [ {"name": property_name, "type": "string", "updatedAt": 1571085954360, "createdAt": 1565059306048} for property_name in fake_properties_list ], "status_code": 200 }] # Create test_stream instance with some state test_stream = Companies(**common_params) test_stream.state = {"updatedAt": "2022-02-24T16:43:11Z"} # Mocking Request requests_mock.register_uri("POST", test_stream.url, responses) requests_mock.register_uri("GET", "/properties/v2/company/properties", properties_response) records = list(test_stream.read_records(sync_mode=SyncMode.incremental)) # The stream should not attempt to get more than 10K records. # Instead, it should use the new state to start a new search query. assert len(records) == 11000 assert test_stream.state['updatedAt'] == '2022-03-01T00:00:00+00:00'
277
test_source.py
Python
airbyte-integrations/connectors/source-hubspot/unit_tests/test_source.py
710543a9abacc7578238cb5edaa47f43ed7c0431
airbyte
7
265,988
69
15
29
370
36
0
91
283
applied_filters
Closes #9623: Implement saved filters (#10801) * Initial work on saved filters * Return only enabled/shared filters * Add tests * Clean up filtering of usable SavedFilters
https://github.com/netbox-community/netbox.git
def applied_filters(context, model, form, query_params): user = context['request'].user form.is_valid() # Ensure cleaned_data has been set applied_filters = [] for filter_name in form.changed_data: if filter_name not in form.cleaned_data: continue querydict = query_params.copy() if filter_name not in querydict: continue bound_field = form.fields[filter_name].get_bound_field(form, filter_name) querydict.pop(filter_name) display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)]) applied_filters.append({ 'name': filter_name, 'value': form.cleaned_data[filter_name], 'link_url': f'?{querydict.urlencode()}', 'link_text': f'{bound_field.label}: {display_value}', }) save_link = None if user.has_perm('extras.add_savedfilter') and 'filter' not in context['request'].GET: content_type = ContentType.objects.get_for_model(model).pk parameters = context['request'].GET.urlencode() url = reverse('extras:savedfilter_add') save_link = f"{url}?content_types={content_type}&parameters={quote(parameters)}" return { 'applied_filters': applied_filters, 'save_link': save_link, }
198
helpers.py
Python
netbox/utilities/templatetags/helpers.py
484efdaf75f267a43f9321b938fda1bc967b9e53
netbox
7
39,434
134
16
118
712
27
0
264
887
fit
Reformat SAR+ SQL queries (#1772) 1. Quote SQL names to allow spaces in names 2. Uppercase SQL keywords and functions 3. Lowercase SQL alias and table names 4. Wrap long lines
https://github.com/microsoft/recommenders.git
def fit(self, df): df.createOrReplaceTempView(self._format("{prefix}df_train_input")) if self.timedecay_formula: # With time decay, we compute a sum over ratings given by # a user in the case when T=np.inf, so user gets a # cumulative sum of ratings for a particular item and not # the last rating. Time Decay does a group by on user # item pairs and apply the formula for time decay there # Time T parameter is in days and input time is in # seconds, so we do dt/60/(T*24*60)=dt/(T*24*3600) the # following is the query which we want to run if self.header["time_now"] is None: query = self._format() self.header["time_now"] = self.spark.sql(query).first()[0] query = self._format() # replace with time-decayed version df = self.spark.sql(query) else: if self.header["col_timestamp"] in df.columns: # we need to de-duplicate items by using the latest item query = self._format() df = self.spark.sql(query) df.createOrReplaceTempView(self._format("{prefix}df_train")) log.info("sarplus.fit 1/2: compute item cooccurrences...") # compute cooccurrence above minimum threshold query = self._format() item_cooccurrence = self.spark.sql(query) item_cooccurrence.write.mode("overwrite").saveAsTable( self._format("{prefix}item_cooccurrence") ) # compute the diagonal used later for Jaccard and Lift if self.similarity_type == SIM_LIFT or self.similarity_type == SIM_JACCARD: query = self._format() item_marginal = self.spark.sql(query) item_marginal.createOrReplaceTempView(self._format("{prefix}item_marginal")) if self.similarity_type == SIM_COOCCUR: self.item_similarity = item_cooccurrence elif self.similarity_type == SIM_JACCARD: query = self._format() self.item_similarity = self.spark.sql(query) elif self.similarity_type == SIM_LIFT: query = self._format() self.item_similarity = self.spark.sql(query) else: raise ValueError( "Unknown similarity type: {0}".format(self.similarity_type) ) # store upper triangular log.info( "sarplus.fit 2/2: compute similarity metric %s..." % self.similarity_type ) self.item_similarity.write.mode("overwrite").saveAsTable( self._format("{prefix}item_similarity_upper") ) # expand upper triangular to full matrix query = self._format() self.item_similarity = self.spark.sql(query) self.item_similarity.write.mode("overwrite").saveAsTable( self._format("{prefix}item_similarity") ) # free space self.spark.sql(self._format("DROP TABLE `{prefix}item_cooccurrence`")) self.spark.sql(self._format("DROP TABLE `{prefix}item_similarity_upper`")) self.item_similarity = self.spark.table( self._format("{prefix}item_similarity") )
402
SARPlus.py
Python
contrib/sarplus/python/pysarplus/SARPlus.py
c8522b45ca90d521ab9054563cbaccd92668eb52
recommenders
9
19,577
32
13
9
113
15
1
38
98
find_requirements
Code reorg utils into utils module reduces complexity (#4990) * Split apart the massive utils.py into a utils module
https://github.com/pypa/pipenv.git
def find_requirements(max_depth=3): i = 0 for c, d, f in walk_up(os.getcwd()): i += 1 if i < max_depth: r = os.path.join(c, "requirements.txt") if os.path.isfile(r): return r raise RuntimeError("No requirements.txt found!") # Borrowed from Pew. # See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82 @contextmanager
@contextmanager
64
shell.py
Python
pipenv/utils/shell.py
3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8
pipenv
4
176,538
27
13
8
147
14
0
38
74
is_forest
Added examples in is_forest() and is_tree() (#5524) * examples * examples * examples * Example changed * improved styling * revised * edge labels * improved styling * spacing * error testing * examples * styling * add_nodes removed * spaceing * spacing * spacing
https://github.com/networkx/networkx.git
def is_forest(G): if len(G) == 0: raise nx.exception.NetworkXPointlessConcept("G has no nodes.") if G.is_directed(): components = (G.subgraph(c) for c in nx.weakly_connected_components(G)) else: components = (G.subgraph(c) for c in nx.connected_components(G)) return all(len(c) - 1 == c.number_of_edges() for c in components)
90
recognition.py
Python
networkx/algorithms/tree/recognition.py
1e0d829afa67885e3eeaeb191c7dd824e6ae7e21
networkx
6
168,264
46
14
133
138
24
0
47
156
contains
PERF cache find_stack_level (#48023) cache stacklevel
https://github.com/pandas-dev/pandas.git
def contains(self, pat, case=True, flags=0, na=None, regex=True): r if regex and re.compile(pat).groups: warnings.warn( "This pattern is interpreted as a regular expression, and has " "match groups. To actually get the groups, use str.extract.", UserWarning, stacklevel=find_stack_level(inspect.currentframe()), ) result = self._data.array._str_contains(pat, case, flags, na, regex) return self._wrap_result(result, fill_value=na, returns_string=False)
93
accessor.py
Python
pandas/core/strings/accessor.py
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
3
261,716
41
10
17
164
22
0
57
124
test_class_wrapper_param_validation
MAINT Allow partial param validation for functions (#25087)
https://github.com/scikit-learn/scikit-learn.git
def test_class_wrapper_param_validation(func_module, class_module): func, func_name, func_params, required_params = _get_func_info(func_module) module_name, class_name = class_module.rsplit(".", 1) module = import_module(module_name) klass = getattr(module, class_name) parameter_constraints_func = getattr(func, "_skl_parameter_constraints") parameter_constraints_class = getattr(klass, "_parameter_constraints") parameter_constraints = { **parameter_constraints_class, **parameter_constraints_func, } parameter_constraints = { k: v for k, v in parameter_constraints.items() if k in func_params } _check_function_param_validation( func, func_name, func_params, required_params, parameter_constraints )
105
test_public_functions.py
Python
sklearn/tests/test_public_functions.py
14130f44eb6cba8a2fb2eff8383be8909783cad0
scikit-learn
3
3,984
47
18
17
224
23
0
56
259
test_execute_in_batch_with_fails
🐛 Source FB Marketing: fix `execute_in_batch` when batch is bigger than 50 (#10588) * fix execute_in_batch * add tests * fix pre-commit config Co-authored-by: Sherif A. Nada <[email protected]> Co-authored-by: Eugene Kulak <[email protected]> Co-authored-by: Sherif A. Nada <[email protected]>
https://github.com/airbytehq/airbyte.git
def test_execute_in_batch_with_fails(self, api, batch, mock_batch_responses): mock_batch_responses( [ { "json": [ {"body": "{}", "code": 500, "headers": {}}, {"body": json.dumps({"name": "creative 1"}), "code": 200, "headers": {}}, ], } ] ) stream = SomeTestStream(api=api) requests = [FacebookRequest("node", "GET", "endpoint") for _ in range(5)] with pytest.raises(RuntimeError, match="Batch request failed with response:"): list(stream.execute_in_batch(requests)) assert batch.add_request.call_count == len(requests) assert batch.execute.call_count == 1
130
test_base_streams.py
Python
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_streams.py
6cd20e6879a42053692c18a16906eb410aad85db
airbyte
2
164,647
19
12
16
73
10
0
19
92
test_repeated_column_labels
TST: Don't use autouse fixture in test_stata (#45831)
https://github.com/pandas-dev/pandas.git
def test_repeated_column_labels(self, datapath): # GH 13923, 25772 msg = with pytest.raises(ValueError, match=msg): read_stata( datapath("io", "data", "stata", "stata15.dta"), convert_categoricals=True, )
40
test_stata.py
Python
pandas/tests/io/test_stata.py
c055dc4e6be9fc1b68d873a1ace286322dadd5e1
pandas
1
267,461
115
21
51
587
41
0
194
1,014
_run
Forked display via queue (#77056) * Forked Display via queue * Docs and simple code cleanup * Only proxy Display.display * Remove unused import * comment * Update deadlock comment, remove py3 check * Don't flush display, and don't lock from forks * clog frag * ci_complete ci_coverage * Add units for queue proxying * Cleanup flush * ci_complete * Only lock the write, switch to RLock * Remove unused import
https://github.com/ansible/ansible.git
def _run(self): # import cProfile, pstats, StringIO # pr = cProfile.Profile() # pr.enable() # Set the queue on Display so calls to Display.display are proxied over the queue display.set_queue(self._final_q) try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._final_q ).run() display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid)) self._host.vars = dict() self._host.groups = [] # put the result on the result queue display.debug("sending task result for task %s" % self._task._uuid) self._final_q.send_task_result( self._host.name, self._task._uuid, executor_result, task_fields=self._task.dump_attrs(), ) display.debug("done sending task result for task %s" % self._task._uuid) except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] self._final_q.send_task_result( self._host.name, self._task._uuid, dict(unreachable=True), task_fields=self._task.dump_attrs(), ) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] self._final_q.send_task_result( self._host.name, self._task._uuid, dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''), task_fields=self._task.dump_attrs(), ) except Exception: display.debug(u"WORKER EXCEPTION: %s" % to_text(e)) display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc())) finally: self._clean_up() display.debug("WORKER PROCESS EXITING") # pr.disable() # s = StringIO.StringIO() # sortby = 'time' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # with open('worker_%06d.stats' % os.getpid(), 'w') as f: # f.write(s.getvalue())
365
worker.py
Python
lib/ansible/executor/process/worker.py
5e369604e1930b1a2e071fecd7ec5276ebd12cb1
ansible
7
261,253
86
13
28
252
20
0
124
415
_check_interaction_cst
ENH FEA add interaction constraints to HGBT (#21020) Co-authored-by: Loïc Estève <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _check_interaction_cst(self, n_features): if self.interaction_cst is None: return None if not ( isinstance(self.interaction_cst, Iterable) and all(isinstance(x, Iterable) for x in self.interaction_cst) ): raise ValueError( "Interaction constraints must be None or an iterable of iterables, " f"got: {self.interaction_cst!r}." ) invalid_indices = [ x for cst_set in self.interaction_cst for x in cst_set if not (isinstance(x, Integral) and 0 <= x < n_features) ] if invalid_indices: raise ValueError( "Interaction constraints must consist of integer indices in [0," f" n_features - 1] = [0, {n_features - 1}], specifying the position of" f" features, got invalid indices: {invalid_indices!r}" ) constraints = [set(group) for group in self.interaction_cst] # Add all not listed features as own group by default. rest = set(range(n_features)) - set().union(*constraints) if len(rest) > 0: constraints.append(rest) return constraints
145
gradient_boosting.py
Python
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
5ceb8a6a031ddff26a7ede413db1b53edb64166a
scikit-learn
12
101,331
11
9
4
45
7
0
11
43
_flush_queues
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
https://github.com/deepfakes/faceswap.git
def _flush_queues(self): for q_name in self.queues: self.flush_queue(q_name) logger.debug("QueueManager flushed all queues")
25
queue_manager.py
Python
lib/queue_manager.py
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
2
133,519
36
12
25
174
20
0
53
159
fetch_latest_alerts
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def fetch_latest_alerts(rds_data_client): schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"] sql = f result = rds_data_client.execute_statement( database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"], secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"], resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"], schema=schema, sql=sql, ) for row in result["records"]: category, test_suite, test_name, last_result_hash, last_notification_dt = ( r["stringValue"] if "stringValue" in r else None for r in row ) last_notification_dt = datetime.datetime.strptime( last_notification_dt, "%Y-%m-%d %H:%M:%S" ) yield category, test_suite, test_name, last_result_hash, last_notification_dt
107
alert.py
Python
release/alert.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
4
296,838
23
11
8
82
12
1
24
62
fake_schedule
Add initial implementation of a calendar trigger (#68674) * Add initial implementation of calendar trigger This is an initial implementation of a calendar trigger, that supports triggering on calendar start time. See architecture proposal in: https://github.com/home-assistant/architecture/discussions/700 * Address reviewer feedback * Use f-strings for all tests * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Remove logging f-strings, and move to main code * Remove mypy ignore * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Update calendar triggers to use new calendar data model * Update tests/components/calendar/test_trigger.py Co-authored-by: Franck Nijhof <[email protected]> * Rewrite tests using freezegun Rewrite tests using freezegun and improve edge case handling, and use utc consistently for all alarms. * Update homeassistant/components/calendar/trigger.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/calendar/trigger.py Co-authored-by: Martin Hjelmare <[email protected]> * Increase test coverage based on pr feedback Co-authored-by: Martin Hjelmare <[email protected]> Co-authored-by: Franck Nijhof <[email protected]>
https://github.com/home-assistant/core.git
def fake_schedule(hass, freezer): # Setup start time for all tests freezer.move_to("2022-04-19 10:31:02+00:00") schedule = FakeSchedule(hass, freezer) with patch( "homeassistant.components.demo.calendar.DemoCalendar.async_get_events", new=schedule.async_get_events, ): yield schedule @pytest.fixture(autouse=True)
@pytest.fixture(autouse=True)
37
test_trigger.py
Python
tests/components/calendar/test_trigger.py
a2c74b978664b627bafc4a43b26aa2be7b15b229
core
1
262,772
28
12
5
110
14
0
29
48
convert_binary_to_thin_arch
macOS: Remove the timeouts for codesigning/signature stripping/lipo. (#6644)
https://github.com/pyinstaller/pyinstaller.git
def convert_binary_to_thin_arch(filename, thin_arch): cmd_args = ['lipo', '-thin', thin_arch, filename, '-output', filename] p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) if p.returncode: raise SystemError(f"lipo command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
58
osx.py
Python
PyInstaller/utils/osx.py
1cd3b73e2939052271a0bc26cf204eebee4dcd15
pyinstaller
2
195,558
7
7
5
28
5
0
7
21
join_hook
[FSDP] Zero 3 Optimization Support (#4903) * zero3 init commit * minor cleanup: * handle mpeval * remove fairscale dependence * fsdp avail * update reqs * better reqs * autoformat * autofromat
https://github.com/facebookresearch/ParlAI.git
def join_hook(self, **kwargs) -> JoinHook: return TrainLoopJoinHook(self)
16
fsdp.py
Python
parlai/utils/fsdp.py
96aa1bb7cc28bae0a7367344f91d2c517485f686
ParlAI
1
133,959
2
6
4
13
2
0
2
5
ray_start_client_server_for_address
[Test][Client] Only start ray once in client tests (#28835) It looks like we're frequently starting and shutting down Ray in this test because `ray_start_client_server` isn't connecting to the Ray created by `ray_start_regular_shared`, and is instead starting a new Ray head process every time it launches. Ray client tests are failing frequently with: ``` [2022-10-06 07:31:46,253 E 13235 13751] core_worker_process.cc:277: The core worker has already been shutdown. This happens when the language frontend accesses the Ray's worker after it is shutdown. The process will exit ``` Which is probably caused by having multiple ray clusters running simultaneous, with some shutting down asynchronously. This refactor forces all of the tests in the module to use the same Ray cluster. Also fixes two other sources of potential flakiness: * Joins the thread in test_client_thread_safe (seems like this has a bad interaction when the client server is cleaned up) * Calls ray.get in `test_stdout_log_stream`, to make sure that the remote function is done running before we try searching for its output Should also have the happy side effect of speeding up test_client. Ran the `Small & Client` tests (regular and external redis) twice each, no flakes, and windows version of test_client.
https://github.com/ray-project/ray.git
def ray_start_client_server_for_address(address):
20
ray_client_helpers.py
Python
python/ray/util/client/ray_client_helpers.py
297341e107daee1ea3aff991ae8ea8c90993c683
ray
1
284,770
83
13
70
418
39
0
104
768
call_cpsearch
Preferred data source for a given command and help docs for any command (#1928) * Add a feature flag loaded from . env for preferred data source * Update the load command to use the preferred source * Add a test * Black syntax checker updates * Update test syntax * Update load command documentation * Add a variable to read a sources file from * Add a default sources.json file * Add a helper function to read the source file and get the most relevant value * Add some comments * Remove older preferred data source file setting * Add a default source file for data and use it if no user preference specifies a specific file * Remove duplicate source file and old settings values * Update the test file * Update documentation string * Update tests/openbb_terminal/test_settings_controller.py * Black syntax formatting issues * Fix a test case * Fix a typo * Fix Flake8 errors * Fix Flake8 errors * Pyupgrade syntax fixes * Additional linter corrections * More pylint issues * Improve predefined default sources * refactor parse_known_args_and_warn for documentation * Update openbb_terminal/helper_funcs.py * Add a feature flag loaded from . env for preferred data source * Update the load command to use the preferred source * Add a test * Black syntax checker updates * Update test syntax * Update load command documentation * Add a variable to read a sources file from * Add a default sources.json file * Add a helper function to read the source file and get the most relevant value * Add some comments * Remove older preferred data source file setting * Add a default source file for data and use it if no user preference specifies a specific file * Remove duplicate source file and old settings values * Update the test file * Update documentation string * Update tests/openbb_terminal/test_settings_controller.py * Black syntax formatting issues * Fix a test case * Fix a typo * Fix Flake8 errors * Fix Flake8 errors * Pyupgrade syntax fixes * Additional linter corrections * More pylint issues * Improve predefined default sources * refactor parse_known_args_and_warn for documentation * Update openbb_terminal/helper_funcs.py * Forex controller * Tests : Fixing `parse_known_args_and_warn` issue * Comparison Analysis controller * Tests : Comparison Analysis * Tests : Stocks/DarkPoolShorts + Stocks/Options * Tests : fix call_load tests * Linting : fixing line size Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def call_cpsearch(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="cpsearch", description=, ) parser.add_argument( "-q", "--query", help="phrase for search", dest="query", nargs="+", type=str, required="-h" not in other_args, ) parser.add_argument( "-c", "--cat", help="Categories to search: currencies|exchanges|icos|people|tags|all. Default: all", dest="category", default="all", type=str, choices=coinpaprika_model.CATEGORIES, ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: id", default="id", choices=coinpaprika_model.FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=True, ) if other_args: if not other_args[0][0] == "-": other_args.insert(0, "-q") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_search_results( top=ns_parser.limit, sortby=ns_parser.sortby, descend=ns_parser.descend, export=ns_parser.export, query=" ".join(ns_parser.query), category=ns_parser.category, )
259
discovery_controller.py
Python
openbb_terminal/cryptocurrency/discovery/discovery_controller.py
497324d75f523f104bc98bd1e3865bf9d19eef3f
OpenBBTerminal
4
290,683
7
8
2
24
3
0
7
13
test_normalize_url_does_not_touch_valid_url
Normalize url entered in fibaro integration setup dialog (#81996) * Normalize url entered in fibaro integration setup dialog * Improvements as suggested in code review * Fix spelling in comments
https://github.com/home-assistant/core.git
async def test_normalize_url_does_not_touch_valid_url(): assert _normalize_url(TEST_URL) == TEST_URL
12
test_config_flow.py
Python
tests/components/fibaro/test_config_flow.py
ff1ec7a028f747de1f96521eb3df6f98d7426434
core
1
127,153
16
12
9
72
12
0
17
72
get_dashboard_url
[core] Don't override external dashboard URL in internal KV store (#27901) Fix 2.0.0 release blocker bug where Ray State API and Jobs not accessible if the override URL doesn't support adding additional subpaths. This PR keeps the localhost dashboard URL in the internal KV store and only overrides in values printed or returned to the user. images.githubusercontent.com/6900234/184809934-8d150874-90fe-4b45-a13d-bce1807047de.png">
https://github.com/ray-project/ray.git
def get_dashboard_url(): if ray_constants.RAY_OVERRIDE_DASHBOARD_URL in os.environ: return _remove_protocol_from_url( os.environ.get(ray_constants.RAY_OVERRIDE_DASHBOARD_URL) ) else: worker = global_worker worker.check_connected() return _global_node.webui_url
42
worker.py
Python
python/ray/_private/worker.py
4692e8d8023e789120d3f22b41ffb136b50f70ea
ray
2
127,274
137
19
74
650
48
0
189
749
start_rayservice
[nightly] Add serve ha chaos test into nightly test. (#27413) This PR adds a serve ha test. The flow of the tests is: 1. check the kube ray build 2. start ray service 3. warm up the cluster 4. start killing nodes 5. get the stats and make sure it's good
https://github.com/ray-project/ray.git
def start_rayservice(): # step-1: generate the yaml file print(f"Using ray image: {ray_image}") solution = "\n".join( [ f" {line}" for line in pathlib.Path("./solution.py").read_text().splitlines() ] ) locustfile = "\n".join( [ f" {line}" for line in pathlib.Path("./locustfile.py").read_text().splitlines() ] ) template = ( pathlib.Path("ray_v1alpha1_rayservice_template.yaml") .read_text() .format( cluster_id=cluster_id, ray_image=ray_image, solution=solution, locustfile=locustfile, ) ) print("=== YamlFile ===") print(template) tmp_yaml = pathlib.Path("/tmp/ray_v1alpha1_rayservice.yaml") tmp_yaml.write_text(template) print("=== Get Pods from ray-system ===") print( subprocess.check_output( ["kubectl", "get", "pods", "--namespace", "ray-system", "--no-headers"] ).decode() ) # step-2: create the cluter print(f"Creating cluster with id: {cluster_id}") print(subprocess.check_output(["kubectl", "create", "-f", str(tmp_yaml)]).decode()) # step-3: make sure the ray cluster is up w = watch.Watch() start_time = time.time() head_pod_name = None for event in w.stream( func=cli.list_namespaced_pod, namespace="default", label_selector=f"rayCluster={ray_cluster_name},ray.io/node-type=head", timeout_seconds=60, ): if event["object"].status.phase == "Running": assert event["object"].kind == "Pod" head_pod_name = event["object"].metadata.name end_time = time.time() print(f"{cluster_id} started in {end_time-start_time} sec") print(f"head pod {head_pod_name}") break assert head_pod_name is not None # step-4: e2e check it's alive cmd = while True: try: resp = ( subprocess.check_output( f'kubectl exec {head_pod_name} -- python -c "{cmd}"', shell=True ) .decode() .strip() ) if resp == "375": print("Service is up now!") break else: print(f"Failed with msg {resp}") except Exception as e: print("Error", e) time.sleep(2)
343
run_gcs_ft_on_k8s.py
Python
release/k8s_tests/run_gcs_ft_on_k8s.py
4d91f516ca59abd2f4880f36e17630ad3effee11
ray
8
278,792
3
6
3
15
3
0
3
6
_delegate_method
Remove pylint comments. PiperOrigin-RevId: 452353044
https://github.com/keras-team/keras.git
def _delegate_method(keras_tensor_cls, method_name):
18
tf_op_layer.py
Python
keras/layers/core/tf_op_layer.py
3613c3defc39c236fb1592c4f7ba1a9cc887343a
keras
1
224,484
4
8
2
29
4
0
4
18
is_css
Refactor URI handling to not have to deal with backslashes
https://github.com/mkdocs/mkdocs.git
def is_css(self): return self.src_uri.endswith('.css')
15
files.py
Python
mkdocs/structure/files.py
1c50987f9c17b228fdf22456aa369b83bd6b11b9
mkdocs
1
289,759
8
9
3
34
4
0
8
22
async_uninstall_addon
Refactor zwave_js add-on manager (#80883) * Make addon slug an instance attribute * Extract addon name and addon config * Update docstrings
https://github.com/home-assistant/core.git
async def async_uninstall_addon(self) -> None: await async_uninstall_addon(self._hass, self.addon_slug)
19
addon.py
Python
homeassistant/components/zwave_js/addon.py
838691f22f27852a05313809cdf9c51094ad3798
core
1
22,866
25
14
9
157
9
0
30
70
print_n_speak_index
VoiceAssistant This is Voice Assistant coded using Python which can do the following: - 1. Speak Text entered by User. 2. Search anything on Google. 3. Search anything on Wikipedia. 4. Read an MS Word(docx) document. 5. Read a book(PDF). 6. Can be used as a Dictator.
https://github.com/geekcomputers/Python.git
def print_n_speak_index(toc): dash = "-"*(100 - 7) space = " "*47 print(f"{space}INDEX") print(f"\n\nName : {dash} PageNo.\n\n\n\n") for topic in toc: eq_dash = "-"*(100 - len(topic[1])) print(f"{topic[1]} {eq_dash} {topic[2]}") speak(f"{topic[1]} {topic[2]}")
60
textRead.py
Python
VoiceAssistant/Project_Basic_struct/textRead.py
39c49e07066b2a53e176d555af6a7bf8aabb8a9c
Python
2
260,324
74
13
24
263
30
0
98
345
fit
MAINT Use _validate_params in Power and Quantile Transformer (#23672) Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None): self._validate_params() if self.n_quantiles > self.subsample: raise ValueError( "The number of quantiles cannot be greater than" " the number of samples used. Got {} quantiles" " and {} samples.".format(self.n_quantiles, self.subsample) ) X = self._check_inputs(X, in_fit=True, copy=False) n_samples = X.shape[0] if self.n_quantiles > n_samples: warnings.warn( "n_quantiles (%s) is greater than the total number " "of samples (%s). n_quantiles is set to " "n_samples." % (self.n_quantiles, n_samples) ) self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples)) rng = check_random_state(self.random_state) # Create the quantiles of reference self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True) if sparse.issparse(X): self._sparse_fit(X, rng) else: self._dense_fit(X, rng) return self
164
_data.py
Python
sklearn/preprocessing/_data.py
a63a827e6db3fcb65ea05172ee18f67e6a6a27d7
scikit-learn
4
35,044
164
18
46
797
48
0
338
1,099
get_position_embeds
Upgrade black to version ~=22.0 (#15565) * Upgrade black to version ~=22.0 * Check copies * Fix code
https://github.com/huggingface/transformers.git
def get_position_embeds(self, seq_len, dtype, device): d_model = self.config.d_model if self.config.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula. # We need to create and return the matrices phi, psi, pi and omega. pos_seq = torch.arange(0, seq_len, 1.0, dtype=dtype, device=device) freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device) inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2))) sinusoid = pos_seq[:, None] * inv_freq[None] sin_embed = torch.sin(sinusoid) sin_embed_d = self.sin_dropout(sin_embed) cos_embed = torch.cos(sinusoid) cos_embed_d = self.cos_dropout(cos_embed) # This is different from the formula on the paper... phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1) psi = torch.cat([cos_embed, sin_embed], dim=-1) pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1) omega = torch.cat([-sin_embed, cos_embed], dim=-1) return (phi, pi, psi, omega) else: # Notations from the paper, appending A.2.1, final formula. # We need to create and return all the possible vectors R for all blocks and shifts. freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device) inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2))) # Maximum relative positions for the first input rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=dtype, device=device) zero_offset = seq_len * 2 sinusoid = rel_pos_id[:, None] * inv_freq[None] sin_embed = self.sin_dropout(torch.sin(sinusoid)) cos_embed = self.cos_dropout(torch.cos(sinusoid)) pos_embed = torch.cat([sin_embed, cos_embed], dim=-1) pos = torch.arange(0, seq_len, dtype=dtype, device=device) pooled_pos = pos position_embeds_list = [] for block_index in range(0, self.config.num_blocks): # For each block with block_index > 0, we need two types position embeddings: # - Attention(pooled-q, unpooled-kv) # - Attention(pooled-q, pooled-kv) # For block_index = 0 we only need the second one and leave the first one as None. # First type if block_index == 0: position_embeds_pooling = None else: pooled_pos = self.stride_pool_pos(pos, block_index) # construct rel_pos_id stride = 2 ** (block_index - 1) rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos) # Second type pos = pooled_pos stride = 2**block_index rel_pos = self.relative_pos(pos, stride) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos) position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling]) return position_embeds_list
498
modeling_funnel.py
Python
src/transformers/models/funnel/modeling_funnel.py
7732d0fe7a759c9844215920e9f1c5540eafb1a6
transformers
4
266,444
8
12
2
44
6
0
8
14
ansible_concat
Attach concat func to an environment class (#76282) * Attach concat func to an environment class ci_complete * clog and docstrings
https://github.com/ansible/ansible.git
def ansible_concat(nodes): return ''.join([to_text(_fail_on_undefined(v)) for v in nodes])
25
native_helpers.py
Python
lib/ansible/template/native_helpers.py
8febd37f325b049afe448af689064ee019d1099c
ansible
2
178,662
110
12
45
428
37
0
159
485
queryRuntimeInformationMultiple
Plugins: Add DLL specific plugin with configuration * As a first one, this solves the shapely DLL issue with using the new configuration. * Existing codes should be migrated to this with time.
https://github.com/Nuitka/Nuitka.git
def queryRuntimeInformationMultiple(self, info_name, setup_codes, values): info_name = self.plugin_name.replace("-", "_") + "_" + info_name if info_name in self._runtime_information_cache: return self._runtime_information_cache[info_name] keys = [] query_codes = [] for key, value_expression in values: keys.append(key) query_codes.append("print(repr(%s))" % value_expression) query_codes.append('print("-" * 27)') if type(setup_codes) is str: setup_codes = setup_codes.split("\n") cmd = r % { "setup_codes": "\n ".join(setup_codes), "query_codes": "\n".join(query_codes), } try: feedback = check_output([sys.executable, "-c", cmd]) except NuitkaCalledProcessError as e: if e.returncode == 38: return None raise if str is not bytes: # We want to work with strings, that's hopefully OK. feedback = feedback.decode("utf8") # Ignore Windows newlines difference. feedback = [line.strip() for line in feedback.splitlines()] if feedback.count("-" * 27) != len(keys): self.sysexit( "Error, mismatch in output retrieving %r information." % info_name ) feedback = [line for line in feedback if line != "-" * 27] NamedTupleResult = namedtuple(info_name, keys) # We are being lazy here, the code is trusted, pylint: disable=eval-used self._runtime_information_cache[info_name] = NamedTupleResult( *(eval(value) for value in feedback) ) return self._runtime_information_cache[info_name]
253
PluginBase.py
Python
nuitka/plugins/PluginBase.py
e7e31cbdb430a1b5f9ec4e186f10a9e60908a4e9
Nuitka
12
271,565
22
12
11
116
10
0
33
94
flatten_metrics_in_order
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def flatten_metrics_in_order(logs, metrics_names): results = [] for name in metrics_names: if name in logs: results.append(logs[name]) for key in sorted(logs.keys()): if key not in metrics_names: results.append(logs[key]) if len(results) == 1: return results[0] return results
73
training.py
Python
keras/engine/training.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
6
177,502
29
13
8
106
11
0
39
79
multi_source_dijkstra_path_length
Hide edges with a weight of None in A*. (#5945) * Hide edges with a weight of None in A*. This matches the Dijkstra's weight interface. * Update Dijkstra's and A* docs for weights of None. * Add tests for A* with weight of None. * Add another test for A* with a weight function. * Document that None indicates a hidden edge.
https://github.com/networkx/networkx.git
def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"): if not sources: raise ValueError("sources must not be empty") for s in sources: if s not in G: raise nx.NodeNotFound(f"Node {s} not found in graph") weight = _weight_function(G, weight) return _dijkstra_multisource(G, sources, weight, cutoff=cutoff)
65
weighted.py
Python
networkx/algorithms/shortest_paths/weighted.py
d82815dba6c8ddce19cd49f700298dc82a58f066
networkx
4
100,313
79
18
26
425
23
0
107
447
_get_per_session_stats
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def _get_per_session_stats(self): if self._per_session_stats is None: logger.debug("Collating per session stats") compiled = [] for session_id, ts_data in self._time_stats.items(): logger.debug("Compiling session ID: %s", session_id) if self._state is None: logger.debug("Session state dict doesn't exist. Most likely task has been " "terminated during compilation") return compiled.append(self._collate_stats(session_id, ts_data)) self._per_session_stats = list(sorted(compiled, key=lambda k: k["session"])) elif self._session.is_training: logger.debug("Collating per session stats for latest training data") session_id = self._session.session_ids[-1] ts_data = self._time_stats[session_id] if session_id > len(self._per_session_stats): self._per_session_stats.append(self._collate_stats(session_id, ts_data)) stats = self._per_session_stats[-1] stats["start"] = ts_data["start_time"] stats["end"] = ts_data["end_time"] stats["elapsed"] = int(stats["end"] - stats["start"]) stats["iterations"] = ts_data["iterations"] stats["rate"] = (((stats["batch"] * 2) * stats["iterations"]) / stats["elapsed"] if stats["elapsed"] != 0 else 0) logger.debug("per_session_stats: %s", self._per_session_stats)
249
stats.py
Python
lib/gui/analysis/stats.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
7
113,522
6
10
3
32
5
0
6
20
next_label
Mutable V3 (Stage 1) - Label namespace and utilities (#5194)
https://github.com/microsoft/nni.git
def next_label(self) -> str: return str(uid(self.absolute_scope))
18
utils.py
Python
nni/mutable/utils.py
6641d309ac6e98c69295ac3d59bf7fa23bdb6588
nni
1
126,266
17
12
9
87
13
1
21
59
_detect_config_single
[air] Add annotation for Tune module. (#27060) Co-authored-by: Kai Fricke <[email protected]>
https://github.com/ray-project/ray.git
def _detect_config_single(func): func_sig = inspect.signature(func) use_config_single = True try: func_sig.bind({}) except Exception as e: logger.debug(str(e)) use_config_single = False return use_config_single @PublicAPI()
@PublicAPI()
45
util.py
Python
python/ray/tune/utils/util.py
eb69c1ca286a2eec594f02ddaf546657a8127afd
ray
2
245,439
44
14
22
323
30
0
62
272
run
[Feature] Support TeacherStudentValLoop (#8503) * Support MultiValLoop * Rename as TeacherStudentVaLloop
https://github.com/open-mmlab/mmdetection.git
def run(self): self.runner.call_hook('before_val') self.runner.call_hook('before_val_epoch') self.runner.model.eval() model = self.runner.model if is_model_wrapper(model): model = model.module assert hasattr(model, 'teacher') assert hasattr(model, 'student') predict_on = model.semi_test_cfg.get('predict_on', None) multi_metrics = dict() for _predict_on in ['teacher', 'student']: model.semi_test_cfg['predict_on'] = _predict_on for idx, data_batch in enumerate(self.dataloader): self.run_iter(idx, data_batch) # compute metrics metrics = self.evaluator.evaluate(len(self.dataloader.dataset)) multi_metrics.update( {'/'.join((_predict_on, k)): v for k, v in metrics.items()}) model.semi_test_cfg['predict_on'] = predict_on self.runner.call_hook('after_val_epoch', metrics=multi_metrics) self.runner.call_hook('after_val')
191
loops.py
Python
mmdet/engine/runner/loops.py
4886ac46ee1e18fe5b0fc3fd5fe3a24dd5a5743f
mmdetection
5
86,309
21
12
10
121
22
0
21
131
test_user_rate_reached_perf_issues
ref(perf issues): Enable ignore in a time period (#39120) Enable ignoring a performance issue in a time period e.g. ignore this until it happens 10x / hr or ignore until 10 users experience it in an hour.
https://github.com/getsentry/sentry.git
def test_user_rate_reached_perf_issues(self): snooze = GroupSnooze.objects.create(group=self.perf_group, user_count=10, user_window=60) for i in range(0, 10): self.store_transaction( environment=None, project_id=self.project.id, user_id=str(i), groups=[self.perf_group], ) assert not snooze.is_valid(test_rates=True)
80
test_groupsnooze.py
Python
tests/sentry/models/test_groupsnooze.py
d745edbd591063f2c3241cd1960c361834058823
sentry
2
187,373
56
13
24
316
28
0
73
225
log_current_versions
cli: list all dependencies in debug output - Require importlib-metadata as fallback on Python < 3.8 - Add importlib_metadata to streamlink_cli.compat - List all dependencies in `log_current_versions` - Update tests
https://github.com/streamlink/streamlink.git
def log_current_versions(): if not logger.root.isEnabledFor(logging.DEBUG): return # macOS if sys.platform == "darwin": os_version = f"macOS {platform.mac_ver()[0]}" # Windows elif sys.platform == "win32": os_version = f"{platform.system()} {platform.release()}" # Linux / other else: os_version = platform.platform() log.debug(f"OS: {os_version}") log.debug(f"Python: {platform.python_version()}") log.debug(f"Streamlink: {streamlink_version}") # https://peps.python.org/pep-0508/#names re_name = re.compile(r"[A-Z\d](?:[A-Z\d._-]*[A-Z\d])?", re.IGNORECASE) log.debug("Dependencies:") for name in [ match.group(0) for match in map(re_name.match, importlib_metadata.requires("streamlink")) if match is not None ]: try: version = importlib_metadata.version(name) except importlib_metadata.PackageNotFoundError: continue log.debug(f" {name}: {version}")
146
main.py
Python
src/streamlink_cli/main.py
c319aa445e7577134d61da587c6338730b82a4c8
streamlink
8
82,383
12
10
3
51
7
0
12
33
get_instance_icon_alt
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
https://github.com/django-cms/django-cms.git
def get_instance_icon_alt(self): instance, plugin = self.get_plugin_instance() return force_str(plugin.icon_alt(instance)) if instance else ''
29
pluginmodel.py
Python
cms/models/pluginmodel.py
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
2
153,982
22
10
5
95
14
0
25
64
_multiindex_possibly_contains_key
FIX-#4358: MultiIndex `loc` shouldn't drop levels for full-key lookups (#4608) Signed-off-by: Karthik Velayutham <[email protected]>
https://github.com/modin-project/modin.git
def _multiindex_possibly_contains_key(self, axis, key): if not self.qc.has_multiindex(axis=axis): return False multiindex = self.df.index if axis == 0 else self.df.columns return isinstance(key, tuple) and len(key) == len(multiindex.levels)
61
indexing.py
Python
modin/pandas/indexing.py
da7bd1a14752895a38342b2faa3220a8db4cb7ff
modin
4
277,193
19
13
7
109
12
0
23
80
predict
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def predict(self, x, **kwargs): proba = self.model.predict(x, **kwargs) if proba.shape[-1] > 1: classes = proba.argmax(axis=-1) else: classes = (proba > 0.5).astype("int32") return self.classes_[classes]
69
scikit_learn.py
Python
keras/wrappers/scikit_learn.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
166,206
6
7
5
25
4
0
6
20
size
ENH: Implement DataFrame interchange protocol (#46141)
https://github.com/pandas-dev/pandas.git
def size(self) -> int: return self._col.size
14
column.py
Python
pandas/core/exchange/column.py
90140f055892a46f473bd26affab88a7f171e394
pandas
1