id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
291,567
40
10
11
173
9
0
56
181
test_valid_country
Add valid country tests to workday sensors (#82799) fixes undefined
https://github.com/home-assistant/core.git
def test_valid_country(self): # Invalid UTF-8, must not contain U+D800 to U+DFFF with pytest.raises(vol.Invalid): binary_sensor.valid_country("\ud800") with pytest.raises(vol.Invalid): binary_sensor.valid_country("\udfff") # Country MUST NOT be empty with pytest.raises(vol.Invalid): binary_sensor.valid_country("") # Country must be supported by holidays with pytest.raises(vol.Invalid): binary_sensor.valid_country("HomeAssistantLand") # Valid country code validation must not raise an exception for country in ("IM", "LI", "US"): assert binary_sensor.valid_country(country) == country
90
test_binary_sensor.py
Python
tests/components/workday/test_binary_sensor.py
eb25968b31dcf76d6790cee99ff30ead3d27b8af
core
2
216,560
12
9
3
50
6
1
15
20
_pyeapi_conn
Deprecated netmiko_conn and pyeapi_conn in napalm_mod.py as these function should not be called from the CLI
https://github.com/saltstack/salt.git
def _pyeapi_conn(**kwargs): pyeapi_kwargs = pyeapi_nxos_api_args(**kwargs) return __salt__["pyeapi.get_connection"](**pyeapi_kwargs) # ---------------------------------------------------------------------------------------------------------------------- # callable functions # ---------------------------------------------------------------------------------------------------------------------- @proxy_napalm_wrap
@proxy_napalm_wrap
23
napalm_mod.py
Python
salt/modules/napalm_mod.py
d8305bfaa7b98d898f5963b01ca75f277c266322
salt
1
80,135
45
13
39
638
18
0
61
402
test_blocks_and_data_not_operated_on_intact
Add tests for streamfield migration helpers Currently failing due to wagtail-factories being broken on Wagtail 4.1: https://github.com/wagtail/wagtail-factories/issues/65
https://github.com/wagtail/wagtail.git
def test_blocks_and_data_not_operated_on_intact(self): altered_raw_data = apply_changes_to_raw_data( raw_data=self.raw_data, block_path_str="nestedstruct.stream1", operation=RenameStreamChildrenOperation( old_name="char1", new_name="renamed1" ), streamfield=models.SampleModel.content, ) self.assertEqual(altered_raw_data[0], self.raw_data[0]) self.assertEqual(altered_raw_data[3], self.raw_data[3]) self.assertEqual(altered_raw_data[1]["id"], self.raw_data[1]["id"]) self.assertEqual(altered_raw_data[2]["id"], self.raw_data[2]["id"]) self.assertEqual(altered_raw_data[1]["type"], self.raw_data[1]["type"]) self.assertEqual(altered_raw_data[2]["type"], self.raw_data[2]["type"]) for key in self.raw_data[1]["value"].keys(): self.assertIn(key, altered_raw_data[1]["value"]) for key in self.raw_data[1]["value"].keys(): self.assertIn(key, altered_raw_data[2]["value"]) self.assertEqual( altered_raw_data[1]["value"]["char1"], self.raw_data[1]["value"]["char1"] ) self.assertEqual( altered_raw_data[2]["value"]["char1"], self.raw_data[2]["value"]["char1"] ) self.assertEqual( altered_raw_data[1]["value"]["struct1"], self.raw_data[1]["value"]["struct1"], ) self.assertEqual( altered_raw_data[2]["value"]["struct1"], self.raw_data[2]["value"]["struct1"], ) self.assertEqual( altered_raw_data[1]["value"]["list1"], self.raw_data[1]["value"]["list1"] ) self.assertEqual( altered_raw_data[2]["value"]["list1"], self.raw_data[2]["value"]["list1"] )
394
test_nested_structures.py
Python
wagtail/tests/streamfield_migrations/test_nested_structures.py
ad65741b94f36fbe793cf15f0ab002482070cdb6
wagtail
3
246,920
18
6
27
15
2
0
19
40
test_third_party_rules
Replace assertEquals and friends with non-deprecated versions. (#12092)
https://github.com/matrix-org/synapse.git
def test_third_party_rules(self): # patch the rules module with a Mock which will return False for some event # types
194
test_third_party_rules.py
Python
tests/rest/client/test_third_party_rules.py
02d708568b476f2f7716000b35c0adfa4cbd31b3
synapse
2
275,783
40
9
20
203
21
0
44
232
get_config
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def get_config(self): json_word_counts = json.dumps(self.word_counts) json_word_docs = json.dumps(self.word_docs) json_index_docs = json.dumps(self.index_docs) json_word_index = json.dumps(self.word_index) json_index_word = json.dumps(self.index_word) return { "num_words": self.num_words, "filters": self.filters, "lower": self.lower, "split": self.split, "char_level": self.char_level, "oov_token": self.oov_token, "document_count": self.document_count, "word_counts": json_word_counts, "word_docs": json_word_docs, "index_docs": json_index_docs, "index_word": json_index_word, "word_index": json_word_index, }
121
text.py
Python
keras/preprocessing/text.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
169,041
50
12
21
192
18
0
75
277
_process_converter
TYP: Autotyping (#48191) * annotate-magics * annotate-imprecise-magics * none-return * scalar-return * pyi files * ignore vendored file * manual changes * ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments) * run autotyping in pre-commit * remove final and expand safe (and add annotate-imprecise-magics)
https://github.com/pandas-dev/pandas.git
def _process_converter(self, f, filt=None) -> None: if filt is None: filt = lambda col, c: True obj = self.obj assert obj is not None # for mypy needs_new_obj = False new_obj = {} for i, (col, c) in enumerate(obj.items()): if filt(col, c): new_data, result = f(col, c) if result: c = new_data needs_new_obj = True new_obj[i] = c if needs_new_obj: # possibly handle dup columns new_frame = DataFrame(new_obj, index=obj.index) new_frame.columns = obj.columns self.obj = new_frame
122
_json.py
Python
pandas/io/json/_json.py
54347fe684e0f7844bf407b1fb958a5269646825
pandas
6
257,510
7
6
109
13
1
0
7
13
tutorial14_query_classifier
Tutorial 14 edit (#2663) * Rewrite Tutorial 14 for increased user-friendliness * Update Tutorial14 .py file to match .ipynb file * Update Documentation & Code Style * unblock the ci * ignore error in jitterbit/get-changed-files Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Sara Zan <[email protected]>
https://github.com/deepset-ai/haystack.git
def tutorial14_query_classifier(): # Useful for framing headers
728
Tutorial14_Query_Classifier.py
Python
tutorials/Tutorial14_Query_Classifier.py
b87c0c950b2243f47fb249aa3865d4c46edb16df
haystack
5
273,362
11
10
6
68
9
0
15
41
listify_tensors
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def listify_tensors(x): if tf.is_tensor(x): x = x.numpy() if isinstance(x, np.ndarray): x = x.tolist() return x
40
preprocessing_utils.py
Python
keras/layers/preprocessing/preprocessing_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
42,118
27
10
4
219
21
0
27
90
tick_params
Adding Grid.tick_params() method. (#2944) * Adding Grid.tick_params() method. * Address PR comments. * Add What's New entry. * Switch tick_params() test to use pad.
https://github.com/mwaskom/seaborn.git
def tick_params(self, axis='both', **kwargs): for ax in self.figure.axes: ax.tick_params(axis=axis, **kwargs) return self _facet_docs = dict( data=dedent(), rowcol=dedent(), rowcol_order=dedent(), col_wrap=dedent(), share_xy=dedent(), height=dedent(), aspect=dedent(), palette=dedent(), legend_out=dedent(), margin_titles=dedent(), facet_kws=dedent(), )
35
axisgrid.py
Python
seaborn/axisgrid.py
72d1322ee583eb481346e5e661c2998c8a7445dd
seaborn
2
178,331
23
13
13
89
10
0
29
140
isDebianPackagePython
Plugins: Avoid warning from pkg_resources for deprecated versions * We don't care about these, and this warning makes it look like we are doing something wrong.
https://github.com/Nuitka/Nuitka.git
def isDebianPackagePython(): if not isLinux(): return False if python_version < 0x300: return hasattr(sys, "_multiarch") else: with withNoDeprecationWarning(): try: from distutils.dir_util import _multiarch except ImportError: return False else: return True
49
PythonFlavors.py
Python
nuitka/PythonFlavors.py
51e4ae8239bd7824ec3c117780be82e1193b2f7a
Nuitka
4
314,865
38
10
15
225
23
0
67
118
test_pushed_variable_status_change
Add tests for LCN sensor and binary_sensor platforms (#67263)
https://github.com/home-assistant/core.git
async def test_pushed_variable_status_change(hass, entry, lcn_connection): device_connection = get_device_connection(hass, (0, 7, False), entry) address = LcnAddr(0, 7, False) # push status variable inp = ModStatusVar(address, Var.VAR1, VarValue.from_celsius(42)) await device_connection.async_process_input(inp) await hass.async_block_till_done() state = hass.states.get(SENSOR_VAR1) assert state is not None assert float(state.state) == 42.0 # push status setpoint inp = ModStatusVar(address, Var.R1VARSETPOINT, VarValue.from_celsius(42)) await device_connection.async_process_input(inp) await hass.async_block_till_done() state = hass.states.get(SENSOR_SETPOINT1) assert state is not None assert float(state.state) == 42.0
148
test_sensor.py
Python
tests/components/lcn/test_sensor.py
b7b8feda0ffb7487954545c96c50e7f64e2195bc
core
1
243,060
13
9
3
59
7
0
13
34
textsize
add textbbox and textlength to ImageDraw2 and update tests
https://github.com/python-pillow/Pillow.git
def textsize(self, text, font): deprecate("textsize", 10, "textbbox or textlength") return self.draw.textsize(text, font=font.font, __internal__=True)
37
ImageDraw2.py
Python
src/PIL/ImageDraw2.py
1bf87556ef9953eeea5751714d87bdcc98b49702
Pillow
1
100,727
24
13
15
105
13
0
28
102
_check_valid_data
Bugfixes: - Stats graph - Handle NaNs in data - logger - de-elevate matplotlib font messages
https://github.com/deepfakes/faceswap.git
def _check_valid_data(self) -> bool: logger.debug("Validating data. %s", {key: len(val) for key, val in self._display_data.stats.items()}) if any(len(val) == 0 # pylint:disable=len-as-condition for val in self._display_data.stats.values()): return False return True
64
popup_session.py
Python
lib/gui/popup_session.py
afec52309326304f4323029039e49bfcf928ef43
faceswap
4
268,720
37
10
5
71
7
0
47
112
cgroupns_option_supported
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
https://github.com/ansible/ansible.git
def cgroupns_option_supported(self) -> bool: if self.engine == 'docker': # Docker added support for the `--cgroupns` option in version 20.10. # Both the client and server must support the option to use it. # See: https://docs.docker.com/engine/release-notes/#20100 return self.client_major_minor_version >= (20, 10) and self.server_major_minor_version >= (20, 10) raise NotImplementedError(self.engine)
42
docker_util.py
Python
test/lib/ansible_test/_internal/docker_util.py
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
ansible
3
216,118
60
14
28
379
20
0
78
280
test_remove_not_installed
Fix exception in yumpkg.remove for not installed package
https://github.com/saltstack/salt.git
def test_remove_not_installed(): name = "foo" list_pkgs_mock = MagicMock(return_value={}) cmd_mock = MagicMock( return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""} ) salt_mock = { "cmd.run_all": cmd_mock, "lowpkg.version_cmp": rpm.version_cmp, "pkg_resource.parse_targets": MagicMock( return_value=({name: None}, "repository") ), } with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch( "salt.utils.systemd.has_scope", MagicMock(return_value=False) ), patch.dict(yumpkg.__salt__, salt_mock): # Test yum with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict( yumpkg.__grains__, {"os": "CentOS", "osrelease": 7} ): yumpkg.remove(name) cmd_mock.assert_not_called() # Test dnf yumpkg.__context__.pop("yum_bin") cmd_mock.reset_mock() with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( yumpkg.__grains__, {"os": "Fedora", "osrelease": 27} ): yumpkg.remove(name) cmd_mock.assert_not_called()
212
test_yumpkg.py
Python
tests/pytests/unit/modules/test_yumpkg.py
8ea5342cbde034383938e244cdb16a0bf8a777e8
salt
1
177,017
18
8
8
100
7
0
21
77
test_naive_lowest_common_ancestor2
Naive lowest common ancestor implementation (#5736) * Add naive lca methods * Naive algorithm implementation for LCA * Modify naive lca functions * Correct parameters of nx.ancestors * Update lowest_common_ancestors.py * Parametrize tests * Apply suggestions from code review Co-authored-by: Dan Schult <[email protected]> * Yield instead of append * Tests for naive lca * Correct test cases for naive lca algorithms * Apply suggestions from code review Co-authored-by: Mridul Seth <[email protected]> * Fix function name -when calling * Make requested changes * Inlining _get_a_lowest_common_ancestor Co-authored-by: dtuncturk <[email protected]> Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
https://github.com/networkx/networkx.git
def test_naive_lowest_common_ancestor2(self): G = nx.DiGraph() G.add_edge(0, 1) G.add_edge(2, 0) G.add_edge(2, 3) G.add_edge(4, 0) G.add_edge(5, 2) assert naive_lca(G, 1, 3) == 2
64
test_lowest_common_ancestors.py
Python
networkx/algorithms/tests/test_lowest_common_ancestors.py
b2f91c34a23058dd70b41784af0d87890216026a
networkx
1
141,325
17
9
3
73
8
0
18
39
all
[Data/AIR] Move `TensorExtension` to `ray.air` for use in other packages (#25517) Moves Tensor extensions to ray.air to facilitate their use in other Ray libraries (AIR, Serve).
https://github.com/ray-project/ray.git
def all(self, axis=None, out=None, keepdims=False): result = self._tensor.all(axis=axis, out=out, keepdims=keepdims) return result if axis is None else TensorArray(result)
49
pandas.py
Python
python/ray/air/util/tensor_extensions/pandas.py
e0a63f770f9afd6eda2d4da9bf508067429585ae
ray
2
46,701
23
13
9
103
9
0
27
118
redirect_or_json
Add details drawer to Grid View (#22123) * make UI and tree work with mapped tasks basic slide drawer reformat grid background colors improve rendering and add selected dag run fix hover and extra prop switch from drawer to details section add tooltip info to details use API make side panel collapsible, useTasks, dag run actions dag run actions w/ react-query task instance links task actions remove modals adjust panel width and use status color minor details styling add duration to tooltips add last scheduling decision and fix tests * move ref and selection to providers * fix test with mock providers * update TI and DR buttons * download logs and external logs * add extra links to TI details * download log bug fixes * fix extra links, hide local TZ if UTC, * confirm mark task failed/success * Update confirm modals for runs and tasks - async/await on mutations instead of useeffect - add confirmation for run actions * Fix dialog scrolling * Code cleanup and fix task clear * Fix task/run label, dialog focus, dag details overflow, panel open/close * Add timezone provider * Fix TimezoneEvent import * Improve button UX - Remove details panel title - Add button to reset root - Make "More Details" buttons more specific - Specify timezone as DAG timezone * autorefresh dag run details * auto-refresh task instance details * revert useTreeData changes None of these changes were relevant to this PR. Better to be done separately. * Address PR feedback - useState vs useDisclosure - Remove extraneous elements - Copy changes - Wire up params for runTask - Breadcrumb padding * Handle task/run action sideeffects by separating autorefresh and treeData hooks * Clean up views.py endpoints - Pass 'Accept' headers for json returns - Consolidate more endpoints to return json or redirect * pass request as arg * remove request as arg * Anticipate when the 'Accept' header is not present * Fix argument count errors * Replace hard coded urls * Replace hard coded urls in react components * Update filter upstream link * Split TaskInstance details component * Fix undefined variables in tests * init_api_connexion in tests - add readme - rename context providers to avoid confusion with Airflow Providers * Fix url params, hide last item breadcrumb links * Update task run failed copy * Fix taskinstance/list buttons Co-authored-by: Tzu-ping Chung <[email protected]>
https://github.com/apache/airflow.git
def redirect_or_json(origin, msg, status=""): if request.headers.get('Accept') == 'application/json': return {'status': status, 'message': msg} else: if status: flash(msg, status) else: flash(msg) return redirect(origin) ###################################################################################### # Error handlers ######################################################################################
56
views.py
Python
airflow/www/views.py
2bb26a38070a4b949bfb210ef1d5644e016e373a
airflow
3
164,472
24
12
15
141
13
0
31
78
test_assert_extension_array_equal_less_precise
⬆️ UPGRADE: Autoupdate pre-commit config (#45752) Co-authored-by: MarcoGorelli <[email protected]>
https://github.com/pandas-dev/pandas.git
def test_assert_extension_array_equal_less_precise(decimals): rtol = 0.5 * 10**-decimals arr1 = SparseArray([0.5, 0.123456]) arr2 = SparseArray([0.5, 0.123457]) if decimals >= 5: msg = with pytest.raises(AssertionError, match=msg): tm.assert_extension_array_equal(arr1, arr2, rtol=rtol) else: tm.assert_extension_array_equal(arr1, arr2, rtol=rtol)
89
test_assert_extension_array_equal.py
Python
pandas/tests/util/test_assert_extension_array_equal.py
419331c598a097896edae40bc0687e4127f97b6b
pandas
2
34,808
47
11
7
150
21
0
57
113
register_for_auto_class
Save code of registered custom models (#15379) * Allow dynamic modules to use relative imports * Work for configs * Fix last merge conflict * Save code of registered custom objects * Map strings to strings * Fix test * Add tokenizer * Rework tests * Tests * Ignore fixtures py files for tests * Tokenizer test + fix collection * With full path * Rework integration * Fix typo * Remove changes in conftest * Test for tokenizers * Add documentation * Update docs/source/custom_models.mdx Co-authored-by: Lysandre Debut <[email protected]> * Add file structure and file content * Add more doc * Style * Update docs/source/custom_models.mdx Co-authored-by: Suraj Patil <[email protected]> * Address review comments Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Suraj Patil <[email protected]>
https://github.com/huggingface/transformers.git
def register_for_auto_class(cls, auto_class="FlaxAutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. FlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub) FlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="FlaxAutoModel", object_files="model checkpoint" )
52
modeling_flax_utils.py
Python
src/transformers/modeling_flax_utils.py
44b21f117bcf71e3d88a11c3523c94b27949fdbf
transformers
3
125,444
26
13
7
102
12
1
30
74
process_dict_for_yaml_dump
[Serve] Fix Formatting of Error Messages printed in `serve status` (#26578)
https://github.com/ray-project/ray.git
def process_dict_for_yaml_dump(data): for k, v in data.items(): if isinstance(v, dict): data[k] = process_dict_for_yaml_dump(v) elif isinstance(v, str): data[k] = remove_ansi_escape_sequences(v) return data @click.group(help="CLI for managing Serve instances on a Ray cluster.")
@click.group(help="CLI for managing Serve instances on a Ray cluster.")
53
scripts.py
Python
python/ray/serve/scripts.py
b856daebbdc923a216ce412be477c61e6cc5707e
ray
4
247,436
77
12
48
391
20
0
138
577
test_redact_relation_thread
Invalidate caches when an event with a relation is redacted. (#12121) The caches for the target of the relation must be cleared so that the bundled aggregations are re-calculated after the redaction is processed.
https://github.com/matrix-org/synapse.git
def test_redact_relation_thread(self) -> None: channel = self._send_relation( RelationTypes.THREAD, EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Note that the *last* event in the thread is redacted, as that gets # included in the bundled aggregation. channel = self._send_relation( RelationTypes.THREAD, EventTypes.Message, content={"body": "reply 2", "msgtype": "m.text"}, ) self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] # Both relations exist. event_ids, relations = self._make_relation_requests() self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertDictContainsSubset( { "count": 2, "current_user_participated": True, }, relations[RelationTypes.THREAD], ) # And the latest event returned is the event that will be redacted. self.assertEqual( relations[RelationTypes.THREAD]["latest_event"]["event_id"], to_redact_event_id, ) # Redact one of the reactions. self._redact(to_redact_event_id) # The unredacted relation should still exist. event_ids, relations = self._make_relation_requests() self.assertEquals(event_ids, [unredacted_event_id]) self.assertDictContainsSubset( { "count": 1, "current_user_participated": True, }, relations[RelationTypes.THREAD], ) # And the latest event is now the unredacted event. self.assertEqual( relations[RelationTypes.THREAD]["latest_event"]["event_id"], unredacted_event_id, )
238
test_relations.py
Python
tests/rest/client/test_relations.py
f63bedef07360216a8de71dc38f00f1aea503903
synapse
1
20,848
18
12
8
67
8
0
23
80
_extra_width
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def _extra_width(self) -> int: width = 0 if self.box and self.show_edge: width += 2 if self.box: width += len(self.columns) - 1 return width
40
table.py
Python
pipenv/patched/notpip/_vendor/rich/table.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
4
159,357
6
7
3
27
3
0
6
20
required_packages
Add Logistic Regression to our NLU classifiers. (#10650) * added-logistic-regression * added * d0h! gotta copy the imports correctly * run black * black issues fixed * stash * added tolerance hyperparam * added random seed * fixed testing path * ran black * use joblib directly * insurance against sklearn changes * added try except * ran black * make code more DRY * flake8 * added type information * add train -> persists -> load -> load * add to test_train.py * fixed style issues * actually persist model * persist, i insist * fixed-bug * added-documentation * black * added changelog * added * moar-whitespace * removed stale param * added comments
https://github.com/RasaHQ/rasa.git
def required_packages() -> List[Text]: return ["sklearn"]
14
logistic_regression_classifier.py
Python
rasa/nlu/classifiers/logistic_regression_classifier.py
dc762814317ce46873a5226ee09033031a7d3604
rasa
1
136,360
33
12
9
94
9
0
36
123
delete_job
[Jobs] Add DELETE endpoint (#30056) Adds a DELETE endpoint to the REST API, SDK and CLI for Ray Jobs. Before this, there was no way to delete the job info from the cluster, so the memory could potentially grow without bound. Deleting a job immediately deletes all its associated info (status, metadata) from memory. We only support deleting jobs that are in a terminal state. We don't delete the log files of a deleted job.
https://github.com/ray-project/ray.git
async def delete_job(self, job_id): job_status = await self._job_info_client.get_status(job_id) if job_status is None or not job_status.is_terminal(): raise RuntimeError( f"Attempted to delete job '{job_id}', " f"but it is in a non-terminal state {job_status}." ) await self._job_info_client.delete_info(job_id) return True
50
job_manager.py
Python
dashboard/modules/job/job_manager.py
1639914ba5a37ed56a644fdb4c712f6da064746c
ray
3
290,402
17
11
9
91
11
0
20
92
async_media_play_pause
Update mypy to 0.990 (#81783) * Update mypy to 0.990 * Remove type ignore - overriding attr with property (13475) * Remove type ignores - hasattr (13544) * Adjust type ignore - assignment (13549) * New error code - type-abstract (13785) * Disable annotation-unchecked (13851)
https://github.com/home-assistant/core.git
async def async_media_play_pause(self) -> None: if hasattr(self, "media_play_pause"): await self.hass.async_add_executor_job(self.media_play_pause) return if self.state == MediaPlayerState.PLAYING: await self.async_media_pause() else: await self.async_media_play()
51
__init__.py
Python
homeassistant/components/media_player/__init__.py
0c8eeaa6436b04ba6da46bccab8b11523f314d9b
core
3
180,405
134
15
113
1,287
38
0
279
1,685
test_component_functions
Wrap dataframe (#1571) * add suport for table cell wrapping * revert pkginfo change * revert pkginfo change * fixed tests * formatting * type hint Co-authored-by: Abubakar Abid <[email protected]>
https://github.com/gradio-app/gradio.git
def test_component_functions(self): x_data = [["Tim", 12, False], ["Jan", 24, True]] dataframe_input = gr.Dataframe(headers=["Name", "Age", "Member"]) output = dataframe_input.preprocess(x_data) self.assertEqual(output["Age"][1], 24) self.assertEqual(output["Member"][0], False) self.assertEqual(dataframe_input.preprocess_example(x_data), x_data) self.assertEqual(dataframe_input.serialize(x_data, True), x_data) with tempfile.TemporaryDirectory() as tmpdirname: to_save = dataframe_input.save_flagged( tmpdirname, "dataframe_input", x_data, None ) self.assertEqual(json.dumps(x_data), to_save) restored = dataframe_input.restore_flagged(tmpdirname, to_save, None) self.assertEqual(x_data, restored) self.assertIsInstance(dataframe_input.generate_sample(), list) dataframe_input = gr.Dataframe( headers=["Name", "Age", "Member"], label="Dataframe Input" ) self.assertEqual( dataframe_input.get_config(), { "headers": ["Name", "Age", "Member"], "datatype": "str", "row_count": (3, "dynamic"), "col_count": (3, "dynamic"), "value": [ ["", "", ""], ["", "", ""], ["", "", ""], ], "name": "dataframe", "show_label": True, "label": "Dataframe Input", "max_rows": 20, "max_cols": None, "overflow_row_behaviour": "paginate", "style": {}, "elem_id": None, "visible": True, "interactive": None, "wrap": False, }, ) dataframe_input = gr.Dataframe() output = dataframe_input.preprocess(x_data) self.assertEqual(output[1][1], 24) with self.assertRaises(ValueError): wrong_type = gr.Dataframe(type="unknown") wrong_type.preprocess(x_data) # Output functionalities dataframe_output = gr.Dataframe() output = dataframe_output.postprocess(np.zeros((2, 2))) self.assertDictEqual(output, {"data": [[0, 0], [0, 0]]}) output = dataframe_output.postprocess([[1, 3, 5]]) self.assertDictEqual(output, {"data": [[1, 3, 5]]}) output = dataframe_output.postprocess( pd.DataFrame([[2, True], [3, True], [4, False]], columns=["num", "prime"]) ) self.assertDictEqual( output, { "headers": ["num", "prime"], "data": [[2, True], [3, True], [4, False]], }, ) self.assertEqual( dataframe_output.get_config(), { "headers": None, "max_rows": 20, "max_cols": None, "overflow_row_behaviour": "paginate", "name": "dataframe", "show_label": True, "label": None, "style": {}, "elem_id": None, "visible": True, "datatype": "str", "row_count": (3, "dynamic"), "col_count": (3, "dynamic"), "value": [ ["", "", ""], ["", "", ""], ["", "", ""], ], "interactive": None, "wrap": False, }, ) with self.assertRaises(ValueError): wrong_type = gr.Dataframe(type="unknown") wrong_type.postprocess(0) with tempfile.TemporaryDirectory() as tmpdirname: to_save = dataframe_output.save_flagged( tmpdirname, "dataframe_output", output, None ) self.assertEqual( to_save, json.dumps( { "headers": ["num", "prime"], "data": [[2, True], [3, True], [4, False]], } ), ) self.assertEqual( dataframe_output.restore_flagged(tmpdirname, to_save, None), { "headers": ["num", "prime"], "data": [[2, True], [3, True], [4, False]], }, )
780
test_components.py
Python
test/test_components.py
8710d3a079d38b857620ec6f1bb4d261ae242263
gradio
1
108,514
33
12
11
130
17
0
44
166
_sci
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <[email protected]>
https://github.com/matplotlib/matplotlib.git
def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError("ContourSet must be in current Axes") elif im not in self._children: raise ValueError("Argument must be an image, collection, or " "ContourSet in this Axes") self._current_image = im
81
_base.py
Python
lib/matplotlib/axes/_base.py
032316bc6c7798fca6c82de24167c975f237687f
matplotlib
4
113,697
23
10
11
113
12
0
36
63
hook_to_dtype_layout
fix(speedup): support for a special torchscript op 'aten::to.layout' with no corresponding torch.to (#5178)
https://github.com/microsoft/nni.git
def hook_to_dtype_layout(positional, keyword, undetermined, undetermined_special_treat): assert 'layout' not in undetermined assert 'pin_memory' not in undetermined assert 'non_blocking' not in undetermined to_layout = arg_trans_layout(keyword['layout']) del keyword['layout'] del keyword['pin_memory'] del keyword['non_blocking'] real_to = FuncAdapter(torch.Tensor.to, positional, keyword, undetermined, undetermined_special_treat)
73
jit_translate.py
Python
nni/compression/pytorch/speedup/jit_translate.py
9a289ec21a40d870c0b80b93651d72a6693f7386
nni
1
265,901
30
16
19
180
28
0
34
299
test_cache_multiple_objects
Closes #10560: New global search (#10676) * Initial work on new search backend * Clean up search backends * Return only the most relevant result per object * Clear any pre-existing cached entries on cache() * #6003: Implement global search functionality for custom field values * Tweak field weights & document guidance * Extend search() to accept a lookup type * Move get_registry() out of SearchBackend * Enforce object permissions when returning search results * Add indexers for remaining models * Avoid calling remove() on non-cacheable objects * Use new search backend by default * Extend search backend to filter by object type * Clean up search view form * Enable specifying lookup logic * Add indexes for value field * Remove object type selector from search bar * Introduce SearchTable and enable HTMX for results * Enable pagination * Remove legacy search backend * Cleanup * Use a UUID for CachedValue primary key * Refactoring search methods * Define max search results limit * Extend reindex command to support specifying particular models * Add clear() and size to SearchBackend * Optimize bulk caching performance * Highlight matched portion of field value * Performance improvements for reindexing * Started on search tests * Cleanup & docs * Documentation updates * Clean up SearchIndex * Flatten search registry to register by app_label.model_name * Clean up search backend classes * Clean up RestrictedGenericForeignKey and RestrictedPrefetch * Resolve migrations conflict
https://github.com/netbox-community/netbox.git
def test_cache_multiple_objects(self): sites = Site.objects.all() search_backend.cache(sites) content_type = ContentType.objects.get_for_model(Site) self.assertEqual( CachedValue.objects.filter(object_type=content_type).count(), len(SiteIndex.fields) * sites.count() ) for site in sites: for field_name, weight in SiteIndex.fields: self.assertTrue( CachedValue.objects.filter( object_type=content_type, object_id=site.pk, field=field_name, value=getattr(site, field_name), weight=weight ), )
116
test_search.py
Python
netbox/netbox/tests/test_search.py
9628dead07ccef9608b32906aa8194bc948e5a09
netbox
3
270,669
12
9
5
54
8
0
14
53
_get_trainable_state
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _get_trainable_state(self): trainable_state = weakref.WeakKeyDictionary() for layer in self._flatten_layers(): trainable_state[layer] = layer.trainable return trainable_state
32
base_layer.py
Python
keras/engine/base_layer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
215,761
27
12
10
182
9
0
39
85
_acl_changes
[merge jam] Master port 49261 - consul modules (#58101) * add consul states and acl function present/absent * add consul to states doc index * refact/fix consul states * fix doc, fix states * fix name parameter for acl_changes * fixing pylint errors * small changes after review by @rallytime * fix header count * Update consul.py * fix acl_exists description, fix when both id and name are missing * Adding some tests for consul module and consul state module. Some additional fixes in the consul module. * Fixing tests. * Fixing failing tests on Windows. * Adding changelog. * Adding some tests for consul module and consul state module. Some additional fixes in the consul module. * moving tests to pytest. * manual black changes. * One more manual black change. * fixing formatting. Adding versionadded for state module. Co-authored-by: Rémi Jouannet <[email protected]> Co-authored-by: Mike Place <[email protected]> Co-authored-by: Daniel Wozniak <[email protected]> Co-authored-by: Wayne Werner <[email protected]>
https://github.com/saltstack/salt.git
def _acl_changes(name, id=None, type=None, rules=None, consul_url=None, token=None): info = __salt__["consul.acl_info"](id=id, token=token, consul_url=consul_url) if info["res"] and info["data"][0]["Name"] != name: return True elif info["res"] and info["data"][0]["Rules"] != rules: return True elif info["res"] and info["data"][0]["Type"] != type: return True else: return False
112
consul.py
Python
salt/states/consul.py
fb825aa760fa0585a2c8fdafc6e62be8aec8cecf
salt
7
181,898
32
17
10
128
12
0
41
97
generate_export_pipeline_code
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def generate_export_pipeline_code(pipeline_tree, operators): steps = _process_operator(pipeline_tree, operators) # number of steps in a pipeline num_step = len(steps) if num_step > 1: pipeline_text = "make_pipeline(\n{STEPS}\n)".format( STEPS=_indent(",\n".join(steps), 4) ) # only one operator (root = True) else: pipeline_text = "{STEPS}".format(STEPS=_indent(",\n".join(steps), 0)) return pipeline_text
71
export_utils.py
Python
tpot/export_utils.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
2
158,190
43
13
15
274
26
0
62
142
load_data_imdb
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
https://github.com/d2l-ai/d2l-zh.git
def load_data_imdb(batch_size, num_steps=500): data_dir = d2l.download_extract('aclImdb', 'aclImdb') train_data = read_imdb(data_dir, True) test_data = read_imdb(data_dir, False) train_tokens = d2l.tokenize(train_data[0], token='word') test_tokens = d2l.tokenize(test_data[0], token='word') vocab = d2l.Vocab(train_tokens, min_freq=5) train_features = np.array([d2l.truncate_pad( vocab[line], num_steps, vocab['<pad>']) for line in train_tokens]) test_features = np.array([d2l.truncate_pad( vocab[line], num_steps, vocab['<pad>']) for line in test_tokens]) train_iter = d2l.load_array((train_features, train_data[1]), batch_size) test_iter = d2l.load_array((test_features, test_data[1]), batch_size, is_train=False) return train_iter, test_iter, vocab
180
mxnet.py
Python
d2l/mxnet.py
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
3
35,453
95
11
9
138
12
0
129
158
get_tests_dir
[Test refactor 1/5] Per-folder tests reorganization (#15725) * Per-folder tests reorganization Co-authored-by: sgugger <[email protected]> Co-authored-by: Stas Bekman <[email protected]>
https://github.com/huggingface/transformers.git
def get_tests_dir(append_path=None): # this function caller's __file__ caller__file__ = inspect.stack()[1][1] tests_dir = os.path.abspath(os.path.dirname(caller__file__)) while not tests_dir.endswith("tests"): tests_dir = os.path.dirname(tests_dir) if append_path: return os.path.join(tests_dir, append_path) else: return tests_dir # # Helper functions for dealing with testing text outputs # The original code came from: # https://github.com/fastai/fastai/blob/master/tests/utils/text.py # When any function contains print() calls that get overwritten, like progress bars, # a special care needs to be applied, since under pytest -s captured output (capsys # or contextlib.redirect_stdout) contains any temporary printed strings, followed by # \r's. This helper function ensures that the buffer will contain the same output # with and without -s in pytest, by turning: # foo bar\r tar mar\r final message # into: # final message # it can handle a single string or a multiline buffer
75
testing_utils.py
Python
src/transformers/testing_utils.py
29c10a41d04f855c433a6cde7797b325651417d2
transformers
3
89,803
14
9
7
61
12
0
17
45
_should_operate
feat(hybrid-cloud): Create a base parser and middleware for webhooks (#42267) See [HC-468](https://getsentry.atlassian.net/browse/HC-468) Requires https://github.com/getsentry/sentry/pull/42260 This PR establishes the base parser that will be inherited from to forward webhooks to the appropriate integration. It is a slightly modified, portion of this [much larger PR](https://github.com/getsentry/sentry/pull/39169). It was split off in order to update that PR and make it more reviewable. Some background: The IntegrationControlMiddleware catches any incoming requests to the control silo with the `/extensions/` path prefix. It parses the provider out of the URL (e.g. `sentry.io/extensions/slack/something`), and passes the request along to that parser to determine how we handle the request (e.g. do we forward it to a region, multiple regions, handle it async, respond immediately from control, etc.) The BaseRequestParser provides a bunch of helpful methods to these parsers to make the actual integration-specific parsers as minimal as possible. They only need to implement a method for identifying the integration (e.g. from headers, from a signature, from a payload, etc), and how we respond to the webhook (allowing for different behaviour from different webhooks).
https://github.com/getsentry/sentry.git
def _should_operate(self, request) -> bool: is_correct_silo = SiloMode.get_current_mode() == SiloMode.CONTROL is_external = request.path.startswith(self.webhook_prefix) return is_correct_silo and is_external
37
integration_control.py
Python
src/sentry/middleware/integrations/integration_control.py
d8609112d6e2f373692b414acff6d4a2f7466750
sentry
2
2,278
55
11
41
351
24
0
79
299
set
replaced all methods of usermanager class, working login Co-Authored By: Ionesio
https://github.com/OpenMined/PySyft.git
def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs["user_id"] user = self.first(id_int=int(user_id)) if not user: raise UserNotFoundError for k, v in kwargs.items(): if k in user.__attr_searchable__: attributes[k] = v if kwargs.get("email", None): user.email = kwargs["email"] elif kwargs.get("role", None): user.role = kwargs["role"] elif kwargs.get("name", None): user.name = kwargs["name"] elif kwargs.get("budget", None): user.budget = kwargs["budget"] elif kwargs.get("website", None): user.website = kwargs["website"] elif kwargs.get("institution", None): user.institution = kwargs["institution"] else: raise Exception attributes["__blob__"] = user.to_bytes() self.update_one({"id_int": int(user_id)}, {"$set": attributes})
205
user_manager.py
Python
packages/syft/src/syft/core/node/common/node_manager/user_manager.py
066545e8a88e842aa7d0a5d57bac88716001bced
PySyft
10
124,671
67
15
25
217
25
0
105
415
_optimize_stages
Object GC for block splitting inside the dataset splitting (#26196) The pipeline will spill objects when splitting the dataset into multiple equal parts. Co-authored-by: Ubuntu <[email protected]>
https://github.com/ray-project/ray.git
def _optimize_stages(self): context = DatasetContext.get_current() if not context.optimize_fuse_stages: self._optimized_stages = self._stages return # This dummy dataset will be used to get a set of optimized stages. dummy_ds = Dataset( ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)), 0, True, used_from_dataset_pipeline=True, ) # Apply all pipeline operations to the dummy dataset. for stage in self._stages: dummy_ds = stage(dummy_ds) # Get the optimized stages. _, _, stages = dummy_ds._plan._optimize() # Apply these optimized stages to the datasets underlying the pipeline. # These optimized stages will be executed by the PipelineExecutor. optimized_stages = [] for stage in stages: optimized_stages.append( lambda ds, stage=stage: Dataset( ds._plan.with_stage(stage), ds._epoch, True, used_from_dataset_pipeline=True, ) ) self._optimized_stages = optimized_stages
138
dataset_pipeline.py
Python
python/ray/data/dataset_pipeline.py
45ba0e3cacbf4f38b9724437798c75341c2ddc7c
ray
4
38,287
31
11
11
58
6
0
37
110
num_layers
Black preview (#17217) * Black preview * Fixup too! * Fix check copies * Use the same version as the CI * Bump black
https://github.com/huggingface/transformers.git
def num_layers(self) -> int: if not hasattr(self._config, "num_layers"): raise AttributeError( "could not find the number of layers attribute in the model configuration, override the num_layers" " property of the model OnnxConfig to solve this" ) return self._config.num_layers
31
config.py
Python
src/transformers/onnx/config.py
afe5d42d8d1d80af911ed980c2936bfe887078f6
transformers
2
248,496
9
11
6
50
9
0
10
68
_get_canonical_alias
Reduce the amount of state we pull from the DB (#12811)
https://github.com/matrix-org/synapse.git
def _get_canonical_alias(self): return self.get_success( self._storage_controllers.state.get_current_state_event( self.room_id, EventTypes.CanonicalAlias, "" ) )
30
test_directory.py
Python
tests/handlers/test_directory.py
e3163e2e11cf8bffa4cb3e58ac0b86a83eca314c
synapse
1
242,493
23
12
10
137
8
0
28
122
histogram
Document that histogram() uses 256 bins per channel
https://github.com/python-pillow/Pillow.git
def histogram(self, mask=None, extrema=None): self.load() if mask: mask.load() return self.im.histogram((0, 0), mask.im) if self.mode in ("I", "F"): if extrema is None: extrema = self.getextrema() return self.im.histogram(extrema) return self.im.histogram()
84
Image.py
Python
src/PIL/Image.py
de968dd920eaa3d1a27877059c6bbb9043a9d26b
Pillow
4
300,734
33
21
24
204
24
0
42
434
async_step_onvif_devices
Decouple stream options from PyAV options (#71247) Co-authored-by: Allen Porter <[email protected]>
https://github.com/home-assistant/core.git
async def async_step_onvif_devices(self, user_input=None): if user_input is not None: self.options[CONF_EXTRA_ARGUMENTS] = user_input[CONF_EXTRA_ARGUMENTS] self.options[CONF_RTSP_TRANSPORT] = user_input[CONF_RTSP_TRANSPORT] return self.async_create_entry(title="", data=self.options) return self.async_show_form( step_id="onvif_devices", data_schema=vol.Schema( { vol.Optional( CONF_EXTRA_ARGUMENTS, default=self.config_entry.options.get( CONF_EXTRA_ARGUMENTS, DEFAULT_ARGUMENTS ), ): str, vol.Optional( CONF_RTSP_TRANSPORT, default=self.config_entry.options.get( CONF_RTSP_TRANSPORT, next(iter(RTSP_TRANSPORTS)) ), ): vol.In(RTSP_TRANSPORTS), } ), )
134
config_flow.py
Python
homeassistant/components/onvif/config_flow.py
617b0d04dcab521ce3b68b4f4956c1f341f6ea60
core
2
9,888
6
7
7
28
4
0
6
20
uses_before_args
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def uses_before_args(self) -> Namespace: return self.peas_args['uses_before']
15
__init__.py
Python
jina/peapods/pods/__init__.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
1
102,397
5
6
19
23
5
0
5
8
no_batch_dim_reference_fn
[rnn/gru] no batch dim (#70442) Summary: Fixes https://github.com/pytorch/pytorch/issues/60585 TODO: * [x] Doc updates Pull Request resolved: https://github.com/pytorch/pytorch/pull/70442 Reviewed By: zou3519 Differential Revision: D33460427 Pulled By: jbschlosser fbshipit-source-id: c64d9624c305d90570c79d11a28557f9ec667b27
https://github.com/pytorch/pytorch.git
def no_batch_dim_reference_fn(m, p, *args, **kwargs):
152
common_modules.py
Python
torch/testing/_internal/common_modules.py
6eba936082a641be8ece156f70c0f5c435f7a7aa
pytorch
9
107,534
10
10
4
47
5
0
10
54
fix_minus
Properly capitalize "Unicode". See e.g. https://en.wikipedia.org/wiki/Unicode, https://docs.python.org/3/howto/unicode.html. Also associated minor doc cleanups.
https://github.com/matplotlib/matplotlib.git
def fix_minus(s): return (s.replace('-', '\N{MINUS SIGN}') if mpl.rcParams['axes.unicode_minus'] else s)
26
ticker.py
Python
lib/matplotlib/ticker.py
88cb4c9d0aa1e790fc4689ca7e68725bf851bf63
matplotlib
2
248,369
92
13
26
284
31
0
125
441
test_prune_gap_if_dummy_remote
Pull out less state when handling gaps mk2 (#12852)
https://github.com/matrix-org/synapse.git
def test_prune_gap_if_dummy_remote(self): body = self.helper.send_event( self.room_id, type=EventTypes.Dummy, content={}, tok=self.token ) local_message_event_id = body["event_id"] self.assert_extremities([local_message_event_id]) # Advance the clock for many days to make the old extremity "old". We # also set the depth to "lots". self.reactor.advance(7 * 24 * 60 * 60) # Fudge a second event which points to an event we don't have. This is a # state event so that the state changes (otherwise we won't prune the # extremity as they'll have the same state group). remote_event_2 = event_from_pdu_json( { "type": EventTypes.Member, "state_key": "@user:other2", "content": {"membership": Membership.JOIN}, "room_id": self.room_id, "sender": "@user:other2", "depth": 10000, "prev_events": ["$some_unknown_message"], "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, RoomVersions.V6, ) state_before_gap = self.get_success( self.state.get_current_state_ids(self.room_id) ) self.persist_event(remote_event_2, state=state_before_gap) # Check the new extremity is just the new remote event. self.assert_extremities([remote_event_2.event_id])
167
test_events.py
Python
tests/storage/test_events.py
b83bc5fab57b37f75a79d02213d6032c586fd36e
synapse
1
215,549
219
19
107
1,087
60
0
431
2,121
sign_in
Use salt.channel.client instead of salt.transport.client
https://github.com/saltstack/salt.git
def sign_in(self, timeout=60, safe=True, tries=1, channel=None): auth = {} auth_timeout = self.opts.get("auth_timeout", None) if auth_timeout is not None: timeout = auth_timeout auth_safemode = self.opts.get("auth_safemode", None) if auth_safemode is not None: safe = auth_safemode auth_tries = self.opts.get("auth_tries", None) if auth_tries is not None: tries = auth_tries m_pub_fn = os.path.join(self.opts["pki_dir"], self.mpub) auth["master_uri"] = self.opts["master_uri"] close_channel = False if not channel: close_channel = True channel = salt.channel.client.AsyncReqChannel.factory( self.opts, crypt="clear", io_loop=self.io_loop ) sign_in_payload = self.minion_sign_in_payload() try: payload = yield channel.send(sign_in_payload, tries=tries, timeout=timeout) except SaltReqTimeoutError as e: if safe: log.warning("SaltReqTimeoutError: %s", e) raise salt.ext.tornado.gen.Return("retry") if self.opts.get("detect_mode") is True: raise salt.ext.tornado.gen.Return("retry") else: raise SaltClientError( "Attempt to authenticate with the salt master failed with timeout" " error" ) finally: if close_channel: channel.close() if not isinstance(payload, dict): log.error("Sign-in attempt failed: %s", payload) raise salt.ext.tornado.gen.Return(False) if "load" in payload: if "ret" in payload["load"]: if not payload["load"]["ret"]: if self.opts["rejected_retry"]: log.error( "The Salt Master has rejected this minion's public " "key.\nTo repair this issue, delete the public key " "for this minion on the Salt Master.\nThe Salt " "Minion will attempt to to re-authenicate." ) raise salt.ext.tornado.gen.Return("retry") else: log.critical( "The Salt Master has rejected this minion's public " "key!\nTo repair this issue, delete the public key " "for this minion on the Salt Master and restart this " "minion.\nOr restart the Salt Master in open mode to " "clean out the keys. The Salt Minion will now exit." ) # Add a random sleep here for systems that are using a # a service manager to immediately restart the service # to avoid overloading the system time.sleep(random.randint(10, 20)) sys.exit(salt.defaults.exitcodes.EX_NOPERM) # has the master returned that its maxed out with minions? elif payload["load"]["ret"] == "full": raise salt.ext.tornado.gen.Return("full") else: log.error( "The Salt Master has cached the public key for this " "node, this salt minion will wait for %s seconds " "before attempting to re-authenticate", self.opts["acceptance_wait_time"], ) raise salt.ext.tornado.gen.Return("retry") auth["aes"] = self.verify_master(payload, master_pub="token" in sign_in_payload) if not auth["aes"]: log.critical( "The Salt Master server's public key did not authenticate!\n" "The master may need to be updated if it is a version of Salt " "lower than %s, or\n" "If you are confident that you are connecting to a valid Salt " "Master, then remove the master public key and restart the " "Salt Minion.\nThe master public key can be found " "at:\n%s", salt.version.__version__, m_pub_fn, ) raise SaltClientError("Invalid master key") if self.opts.get("syndic_master", False): # Is syndic syndic_finger = self.opts.get( "syndic_finger", self.opts.get("master_finger", False) ) if syndic_finger: if ( salt.utils.crypt.pem_finger( m_pub_fn, sum_type=self.opts["hash_type"] ) != syndic_finger ): self._finger_fail(syndic_finger, m_pub_fn) else: if self.opts.get("master_finger", False): if ( salt.utils.crypt.pem_finger( m_pub_fn, sum_type=self.opts["hash_type"] ) != self.opts["master_finger"] ): self._finger_fail(self.opts["master_finger"], m_pub_fn) auth["publish_port"] = payload["publish_port"] raise salt.ext.tornado.gen.Return(auth)
626
crypt.py
Python
salt/crypt.py
70972c8016ff5d6fbdd7f83776077b0936f60dea
salt
22
101,595
32
15
10
168
21
0
41
138
_get_count_and_filelist
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
https://github.com/deepfakes/faceswap.git
def _get_count_and_filelist(self, fast_count, count): if isinstance(self.location, (list, tuple)): file_list = self.location else: file_list = get_image_paths(self.location) self._file_list = [fname for fname in file_list if os.path.splitext(fname)[-1].lower() == ".png"] self._count = len(self.file_list) if count is None else count logger.debug("count: %s", self.count) logger.trace("filelist: %s", self.file_list)
105
image.py
Python
lib/image.py
98d01760e469fd2108eed8d0b0a1ba6297c3177c
faceswap
5
271,005
28
11
9
145
17
0
31
105
test_adapt_doesnt_overwrite_input_shape
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_adapt_doesnt_overwrite_input_shape(self): # Shape: (3, 1, 2) adapt_dataset = np.array( [[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32 ) layer = AddingPreprocessingLayer(input_shape=[1, 2]) layer.adapt(adapt_dataset) model = keras.Sequential([layer]) self.assertTrue(model.built) self.assertEqual(model.input_shape, (None, 1, 2))
108
base_preprocessing_layer_test.py
Python
keras/engine/base_preprocessing_layer_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
47,665
40
12
21
186
25
0
49
256
test_deps_sorted
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_deps_sorted(self): from airflow.operators.empty import EmptyOperator from airflow.sensors.external_task import ExternalTaskSensor execution_date = datetime(2020, 1, 1) with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag: task1 = ExternalTaskSensor( task_id="task1", external_dag_id="external_dag_id", mode="reschedule", ) task2 = EmptyOperator(task_id="task2") task1 >> task2 serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"]) deps = serialize_op["deps"] assert deps == [ 'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep', 'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep', 'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep', 'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep', 'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep', ]
109
test_dag_serialization.py
Python
tests/serialization/test_dag_serialization.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
1
176,169
11
8
4
41
5
0
13
25
tetrahedral_graph
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
https://github.com/networkx/networkx.git
def tetrahedral_graph(create_using=None): G = complete_graph(4, create_using) G.name = "Platonic Tetrahedral graph" return G
23
small.py
Python
networkx/generators/small.py
dec723f072eb997a497a159dbe8674cd39999ee9
networkx
1
155,297
62
16
18
252
35
0
78
178
test_zero_copy_export_for_primitives
REFACTOR-#5303: Fix code scanning alert - Unused local variable (#5304) Signed-off-by: Myachev <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]>
https://github.com/modin-project/modin.git
def test_zero_copy_export_for_primitives(data_has_nulls): data = get_data_of_all_types( has_nulls=data_has_nulls, include_dtypes=["int", "uint", "float"] ) at = pa.Table.from_pydict(data) md_df = from_arrow(at) protocol_df = md_df.__dataframe__(allow_copy=False) for i, col in enumerate(protocol_df.get_columns()): col_arr, _ = primitive_column_to_ndarray(col) exported_ptr = col_arr.__array_interface__["data"][0] producer_ptr = at.column(i).chunks[0].buffers()[-1].address # Verify that the pointers of produce and exported objects point to the same data assert producer_ptr == exported_ptr # Can't export `md_df` zero-copy no more as it has delayed 'fillna' operation md_df = md_df.fillna({"float32": 32.0}) non_zero_copy_protocol_df = md_df.__dataframe__(allow_copy=False) with pytest.raises(RuntimeError): primitive_column_to_ndarray( non_zero_copy_protocol_df.get_column_by_name("float32") )
151
test_protocol.py
Python
modin/test/interchange/dataframe_protocol/hdk/test_protocol.py
eb99c500a40c5565012e3fe83c5e6ef333d1b487
modin
2
259,734
41
11
8
188
21
1
48
80
test_minibatch_nmf_negative_beta_loss
FEA Online implementation of non-negative matrix factorization (#16948) Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_minibatch_nmf_negative_beta_loss(beta_loss): rng = np.random.RandomState(0) X = rng.normal(size=(6, 5)) X[X < 0] = 0 nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0) msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge." with pytest.raises(ValueError, match=msg): nmf.fit(X) @pytest.mark.parametrize( ["Estimator", "solver"], [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], )
@pytest.mark.parametrize( ["Estimator", "solver"], [[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]], )
71
test_nmf.py
Python
sklearn/decomposition/tests/test_nmf.py
69132ebbd39f070590ca01813340b5b12c0d02ab
scikit-learn
1
246,283
19
8
3
42
7
0
19
39
get_logcontext_id
Add missing type hints to synapse.replication. (#11938)
https://github.com/matrix-org/synapse.git
def get_logcontext_id(self) -> str: # by default, we just use the command name. return self.NAME SC = TypeVar("SC", bound="_SimpleCommand")
12
commands.py
Python
synapse/replication/tcp/commands.py
d0e78af35e519ff76bd23e786007f3e7130d90f7
synapse
1
145,235
58
9
7
231
18
1
80
338
_get_gpu_complaint
[KubeRay] Format autoscaling config based on RayCluster CR (#22348) Closes #21655. At the start of each autoscaler iteration, we read the Ray Cluster CR from K8s and use it to extract the autoscaling config.
https://github.com/ray-project/ray.git
def _get_gpu_complaint() -> str: return ( "Detected GPUs in container resources for group small-group." "To ensure Ray and the autoscaler are aware of the GPUs," " set the `--num-gpus` rayStartParam." ) PARAM_ARGS = ",".join( [ "ray_cr_in", "expected_config_out", "expected_error", "expected_error_message", "expected_log_warning", ] ) TEST_DATA = [ pytest.param( _get_basic_ray_cr(), _get_basic_autoscaling_config(), None, None, None, id="basic", ), pytest.param( _get_ray_cr_no_cpu_error(), None, ValueError, _get_no_cpu_error(), None, id="no-cpu-error", ), pytest.param( _get_ray_cr_memory_and_gpu(), _get_autoscaling_config_memory_and_gpu(), None, None, None, id="memory-and-gpu", ), pytest.param( _get_ray_cr_missing_gpu_arg(), _get_basic_autoscaling_config(), None, None, _get_gpu_complaint(), id="gpu-complaint", ), ] @pytest.mark.parametrize(PARAM_ARGS, TEST_DATA)
@pytest.mark.parametrize(PARAM_ARGS, TEST_DATA)
13
test_autoscaling_config.py
Python
docker/kuberay-autoscaler/test_autoscaling_config.py
a402e956a4e1ebe9bc4e2b404536466967c497af
ray
1
261,761
26
12
8
100
17
0
29
61
test_kbinsdiscretizer_wrong_strategy_with_weights
ENH add support for sample_weight in KBinsDiscretizer with strategy="quantile" (#24935) Co-authored-by: seladus <[email protected]> Co-authored-by: Seladus <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_kbinsdiscretizer_wrong_strategy_with_weights(strategy): sample_weight = np.ones(shape=(len(X))) est = KBinsDiscretizer(n_bins=3, strategy=strategy) err_msg = ( "`sample_weight` was provided but it can only be used with strategy='quantile'." ) with pytest.raises(ValueError, match=err_msg): est.fit(X, sample_weight=sample_weight)
60
test_discretization.py
Python
sklearn/preprocessing/tests/test_discretization.py
1f3c1be77a5b15d6bc1a5bfd9eb64315928679b1
scikit-learn
1
156,511
11
8
17
26
4
0
11
25
__dask_graph__
Collection Protocol (#8674) [PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be: - usage with static type checkers - other activity in this area at - #8295 - #8706 - #8854 - Python supporting IDEs take advantage of typing - self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page. - purely opt-in feature The `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base that uses Dask collections; for example: ```python >>> from dask.typing import DaskCollection >>> import dask.array as da >>> x = da.zeros((10, 3)) >>> isinstance(x, DaskCollection) True ``` (though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO) Something else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.
https://github.com/dask/dask.git
def __dask_graph__(self) -> Mapping: raise NotImplementedError("Inheriting class must implement this method.")
13
typing.py
Python
dask/typing.py
1e783d9a714160e968936cb22d54d085959ab09e
dask
1
309,557
50
18
27
240
18
0
62
383
test_notify_credential_profile
Upgrade boto3 to 1.20.24 + aiobotocore to 2.1.0 (#64045)
https://github.com/home-assistant/core.git
async def test_notify_credential_profile(hass): with async_patch("aiobotocore.session.AioSession", new=MockAioSession): await async_setup_component( hass, "aws", { "aws": { "notify": [ { "service": "sqs", "name": "SQS Test", "region_name": "us-east-1", "profile_name": "test", } ] } }, ) await hass.async_block_till_done() sessions = hass.data[aws.DATA_SESSIONS] assert sessions is not None assert len(sessions) == 1 assert isinstance(sessions.get("default"), MockAioSession) assert hass.services.has_service("notify", "sqs_test") is True await hass.services.async_call( "notify", "sqs_test", {"message": "test", "target": "ARN"}, blocking=True )
131
test_init.py
Python
tests/components/aws/test_init.py
b17860a7dd283d54bc452e5dca23532d05822589
core
1
288,027
31
11
6
61
5
0
36
97
model
Refactor apcupsd to use config flow (#64809) * Add Config Flow to APCUPSd integration and remove YAML support. * Hide the binary sensor if user does not select STATFLAG resource. * Add tests for config flows. * Simplify config flow code. * Spell fix. * Fix pylint warnings. * Simplify the code for config flow. * First attempt to implement import flows to suppport legacy YAML configurations. * Remove unnecessary log calls. * Wrap synchronous update call with `hass.async_add_executor_job`. * Import the YAML configurations when sensor platform is set up. * Move the logger call since the variables are not properly set up. * Add codeowner. * Fix name field of manifest.json. * Fix linting issue. * Fix incorrect dependency due to incorrect rebase. * Update codeowner and config flows via hassfest. * Postpone the deprecation warning to 2022.7. * Import future annotations for init file. * Add an newline at the end to make prettier happy. * Update github id. * Add type hints for return types of steps in config flow. * Move the deprecation date for YAML config to 2022.12. * Update according to reviews. * Use async_forward_entry_setups. * Add helper properties to `APCUPSdData` class. * Add device_info for binary sensor. * Simplify config flow. * Remove options flow strings. * update the tests according to the changes. * Add `entity_registry_enabled_default` to entities and use imported CONF_RESOURCES to disable entities instead of skipping them. * Update according to reviews. * Do not use model of the UPS as the title for the integration. Instead, simply use "APCUPSd" as the integration title and let the device info serve as title for each device instead. * Change schema to be a global variable. * Add more comments. * Rewrite the tests for config flows. * Fix enabled_by_default. * Show friendly titles in the integration. * Add import check in `async_setup_platform` to avoid importing in sensor platform setup. * Add import check in `async_setup_platform` to avoid importing in sensor platform setup. * Update comments in test files. * Use parametrize instead of manually iterating different test cases. * Swap the order of the platform constants. * Avoid using broad exceptions. * Set up device info via `_attr_device_info`. * Remove unrelated test in `test_config_flow`. * Use `DeviceInfo` instead of dict to assign to `_attr_device_info`. * Add english translation. * Add `async_create_issue` for deprecated YAML configuration. * Enable UPS status by default since it could show "online, charging, on battery etc" which is meaningful for all users. * Apply suggestions from code review * Apply suggestion * Apply suggestion Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
def model(self) -> str | None: # Different UPS models may report slightly different keys for model, here we # try them all. for model_key in ("APCMODEL", "MODEL"): if model_key in self.status: return self.status[model_key] return None
35
__init__.py
Python
homeassistant/components/apcupsd/__init__.py
52307708c843b947a2d631f2fe7ddaa8bd9a90d7
core
3
46,209
2
6
17
12
1
0
2
5
lazy_add_provider_discovered_options_to_connection_form
Add generic connection type (#22310) See https://github.com/apache/airflow/discussions/20350.
https://github.com/apache/airflow.git
def lazy_add_provider_discovered_options_to_connection_form():
102
views.py
Python
airflow/www/views.py
6d1d53b780c48297fa2e6d8e075fdaa0f0f42e22
airflow
2
275,864
43
14
9
88
13
0
50
109
_legacy_weights
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _legacy_weights(layer): weights = layer.trainable_weights + layer.non_trainable_weights if any(not isinstance(w, tf.Variable) for w in weights): raise NotImplementedError( f"Save or restore weights that is not an instance of `tf.Variable` is " f"not supported in h5, use `save_format='tf'` instead. Received a " f"model or layer {layer.__class__.__name__} with weights {weights}" ) return weights
45
hdf5_format.py
Python
keras/saving/hdf5_format.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
104,754
153
23
55
669
35
0
280
1,003
_create_builder_config
fix outdated docstring about default dataset config (#4186)
https://github.com/huggingface/datasets.git
def _create_builder_config(self, name=None, custom_features=None, **config_kwargs) -> Tuple[BuilderConfig, str]: builder_config = None # try default config if name is None and self.BUILDER_CONFIGS and not config_kwargs: if self.DEFAULT_CONFIG_NAME is not None: builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME) logger.warning(f"No config specified, defaulting to: {self.name}/{builder_config.name}") else: if len(self.BUILDER_CONFIGS) > 1: example_of_usage = f"load_dataset('{self.name}', '{self.BUILDER_CONFIGS[0].name}')" raise ValueError( "Config name is missing." f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}" + f"\nExample of usage:\n\t`{example_of_usage}`" ) builder_config = self.BUILDER_CONFIGS[0] logger.info(f"No config specified, defaulting to the single config: {self.name}/{builder_config.name}") # try get config by name if isinstance(name, str): builder_config = self.builder_configs.get(name) if builder_config is None and self.BUILDER_CONFIGS: raise ValueError(f"BuilderConfig {name} not found. Available: {list(self.builder_configs.keys())}") # if not using an existing config, then create a new config on the fly with config_kwargs if not builder_config: if name is not None: config_kwargs["name"] = name if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION: config_kwargs["version"] = self.VERSION builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs) # otherwise use the config_kwargs to overwrite the attributes else: builder_config = copy.deepcopy(builder_config) for key, value in config_kwargs.items(): if value is not None: if not hasattr(builder_config, key): raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.") setattr(builder_config, key, value) if not builder_config.name: raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}") # compute the config id that is going to be used for caching config_id = builder_config.create_config_id( config_kwargs, custom_features=custom_features, ) is_custom = config_id not in self.builder_configs if is_custom: logger.warning(f"Using custom data configuration {config_id}") else: if builder_config != self.builder_configs[builder_config.name]: raise ValueError( "Cannot name a custom BuilderConfig the same as an available " f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}" ) if not builder_config.version: raise ValueError(f"BuilderConfig {builder_config.name} must have a version") # if not builder_config.description: # raise ValueError(f"BuilderConfig {builder_config.name} must have a description" ) return builder_config, config_id
320
builder.py
Python
src/datasets/builder.py
27973e4fcf45c16dc9f81f18459ce54e4165aa2a
datasets
21
3,358
52
15
18
387
29
0
71
141
test_valid_incremental_read_with_no_interval
CDK: Fix typing errors (#9037) * fix typing, drop AirbyteLogger * format * bump the version * use logger instead of fixture logger Co-authored-by: Eugene Kulak <[email protected]> Co-authored-by: auganbay <[email protected]>
https://github.com/airbytehq/airbyte.git
def test_valid_incremental_read_with_no_interval(mocker): stream_output = [{"k1": "v1"}, {"k2": "v2"}] s1 = MockStream([({"sync_mode": SyncMode.incremental, "stream_state": {}}, stream_output)], name="s1") s2 = MockStream([({"sync_mode": SyncMode.incremental, "stream_state": {}}, stream_output)], name="s2") state = {"cursor": "value"} mocker.patch.object(MockStream, "get_updated_state", return_value=state) mocker.patch.object(MockStream, "supports_incremental", return_value=True) mocker.patch.object(MockStream, "get_json_schema", return_value={}) src = MockSource(streams=[s1, s2]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s1, SyncMode.incremental), _configured_stream(s2, SyncMode.incremental)]) expected = [ *_as_records("s1", stream_output), _state({"s1": state}), *_as_records("s2", stream_output), _state({"s1": state, "s2": state}), ] messages = _fix_emitted_at(list(src.read(logger, {}, catalog, state=defaultdict(dict)))) assert expected == messages
235
test_abstract_source.py
Python
airbyte-cdk/python/unit_tests/sources/test_abstract_source.py
f83eca58eaf2129d21b5796a301732ab22675130
airbyte
1
267,940
4
6
2
23
5
0
4
11
supported_pythons
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def supported_pythons(self) -> t.List[str]:
13
completion.py
Python
test/lib/ansible_test/_internal/completion.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
1
261,504
47
12
16
279
34
1
63
141
test_predict_iris
ENH add newton-cholesky solver to LogisticRegression (#24767)
https://github.com/scikit-learn/scikit-learn.git
def test_predict_iris(clf): n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] if clf.solver == "lbfgs": # lbfgs has convergence issues on the iris data with its default max_iter=100 with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) clf.fit(iris.data, target) else: clf.fit(iris.data, target) assert_array_equal(np.unique(target), clf.classes_) pred = clf.predict(iris.data) assert np.mean(pred == target) > 0.95 probabilities = clf.predict_proba(iris.data) assert_allclose(probabilities.sum(axis=1), np.ones(n_samples)) pred = iris.target_names[probabilities.argmax(axis=1)] assert np.mean(pred == target) > 0.95 @pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV])
@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV])
161
test_logistic.py
Python
sklearn/linear_model/tests/test_logistic.py
bb080aa690364d84d11232c73dc8db2f0dde3578
scikit-learn
2
268,686
57
13
22
374
36
0
80
262
configure_source_trees
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
https://github.com/ansible/ansible.git
def configure_source_trees(cls): current_ansible = pathlib.Path(os.environ['PYTHONPATH']).parent root_ansible = pathlib.Path('~').expanduser() / 'ansible' test_ansible = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}').expanduser() / 'ansible' if current_ansible != root_ansible: display.info(f'copying {current_ansible} -> {root_ansible} ...') rmtree(root_ansible) shutil.copytree(current_ansible, root_ansible) run_command('chown', '-R', 'root:root', str(root_ansible)) display.info(f'copying {current_ansible} -> {test_ansible} ...') rmtree(test_ansible) shutil.copytree(current_ansible, test_ansible) run_command('chown', '-R', f'{UNPRIVILEGED_USER_NAME}:{UNPRIVILEGED_USER_NAME}', str(test_ansible)) paths = [pathlib.Path(test_ansible)] for root, dir_names, file_names in os.walk(test_ansible): paths.extend(pathlib.Path(root, dir_name) for dir_name in dir_names) paths.extend(pathlib.Path(root, file_name) for file_name in file_names) user = pwd.getpwnam(UNPRIVILEGED_USER_NAME) uid = user.pw_uid gid = user.pw_gid for path in paths: os.chown(path, uid, gid)
211
runme.py
Python
test/integration/targets/ansible-test-container/runme.py
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
ansible
6
177,995
56
15
6
371
36
1
81
470
perform_create
feat: DEV-2896: Comment List API (#2704) * feat: DEV-2896: Comment List API * Fix * Fix tests * Fix more tests * Fixes * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * Fix feature flags * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * Add fixes * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * Add user id to base.html * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend Co-authored-by: Brandon Martel <[email protected]>
https://github.com/heartexlabs/label-studio.git
def perform_create(self, serializer): project_id = self.request.data.get('project') project = generics.get_object_or_404(Project, pk=project_id) instance = serializer.save(project=project) emit_webhooks_for_instance(self.request.user.active_organization, project, WebhookAction.TASKS_CREATED, [instance]) @method_decorator(name='get', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Get task', operation_description=, manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ])) @method_decorator(name='patch', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Update task', operation_description='Update the attributes of an existing labeling task.', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], request_body=TaskSimpleSerializer)) @method_decorator(name='delete', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Delete task', operation_description='Delete a task in Label Studio. This action cannot be undone!', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], ))
@method_decorator(name='get', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Get task', operation_description=""" Get task data, metadata, annotations and other attributes for a specific labeling task by task ID. """, manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ])) @method_decorator(name='patch', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Update task', operation_description='Update the attributes of an existing labeling task.', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], request_body=TaskSimpleSerializer)) @method_decorator(name='delete', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Delete task', operation_description='Delete a task in Label Studio. This action cannot be undone!', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], ))
71
api.py
Python
label_studio/tasks/api.py
71a9ada93224ed6433fb5b45bfcd60d5fe3edd4c
label-studio
1
122,415
39
13
9
155
14
0
64
79
_multi_dot_three
Call _check_arraylike for jnp.linalg & jnp.fft functions
https://github.com/google/jax.git
def _multi_dot_three(A, B, C, precision): a0, a1b0 = A.shape b1c0, c1 = C.shape # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 cost1 = a0 * b1c0 * (a1b0 + c1) # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 cost2 = a1b0 * c1 * (a0 + b1c0) if cost1 < cost2: return jnp.dot(jnp.dot(A, B, precision=precision), C, precision=precision) else: return jnp.dot(A, jnp.dot(B, C, precision=precision), precision=precision)
103
linalg.py
Python
jax/_src/third_party/numpy/linalg.py
2416d154355f19e77b5c1ddf1de1f8552e4a98ad
jax
2
20,826
15
11
3
59
8
0
15
29
meta
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def meta(self) -> Dict[str, Any]: return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
39
style.py
Python
pipenv/patched/notpip/_vendor/rich/style.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
2
293,911
85
12
31
381
28
1
145
362
test_get_states_no_attributes
Avoid selecting attributes in the history api when `no_attributes` is passed (#68352)
https://github.com/home-assistant/core.git
def test_get_states_no_attributes(hass_recorder): hass = hass_recorder() now, future, states = _setup_get_states(hass) for state in states: state.attributes = {} # Get states returns everything before POINT for all entities for state1, state2 in zip( states, sorted( history.get_states(hass, future, no_attributes=True), key=lambda state: state.entity_id, ), ): assert state1 == state2 # Get states returns everything before POINT for tested entities entities = [f"test.point_in_time_{i % 5}" for i in range(5)] for state1, state2 in zip( states, sorted( history.get_states(hass, future, entities, no_attributes=True), key=lambda state: state.entity_id, ), ): assert state1 == state2 # Test get_state here because we have a DB setup assert states[0] == history.get_state( hass, future, states[0].entity_id, no_attributes=True ) time_before_recorder_ran = now - timedelta(days=1000) assert history.get_states(hass, time_before_recorder_ran, no_attributes=True) == [] assert ( history.get_state(hass, time_before_recorder_ran, "demo.id", no_attributes=True) is None ) @pytest.mark.parametrize( "attributes, no_attributes, limit", [ ({"attr": True}, False, 5000), ({}, True, 5000), ({"attr": True}, False, 3), ({}, True, 3), ], )
@pytest.mark.parametrize( "attributes, no_attributes, limit", [ ({"attr": True}, False, 5000), ({}, True, 5000), ({"attr": True}, False, 3), ({}, True, 3), ], )
199
test_history.py
Python
tests/components/recorder/test_history.py
816695cc96c19110ccda10431d92160ea6064d32
core
5
68,683
39
20
25
235
25
0
51
31
_generate_dependants_map
style: Update docstrings and fix/add type hints + Collapsible progress section in Log
https://github.com/frappe/erpnext.git
def _generate_dependants_map() -> defaultdict: bom = frappe.qb.DocType("BOM") bom_item = frappe.qb.DocType("BOM Item") bom_parents = ( frappe.qb.from_(bom_item) .join(bom) .on(bom_item.parent == bom.name) .select(bom_item.bom_no, bom_item.parent) .where( (bom_item.bom_no.isnotnull()) & (bom_item.bom_no != "") & (bom.docstatus == 1) & (bom.is_active == 1) & (bom_item.parenttype == "BOM") ) ).run(as_dict=True) child_parent_map = defaultdict(list) for bom in bom_parents: child_parent_map[bom.bom_no].append(bom.parent) return child_parent_map
144
bom_updation_utils.py
Python
erpnext/manufacturing/doctype/bom_update_log/bom_updation_utils.py
9f5f18e94da4254255a32d792abc94407ca5fde0
erpnext
2
133,165
37
12
12
83
10
0
39
126
register_ray
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def register_ray(): try: from ray.util.joblib.ray_backend import RayBackend register_parallel_backend("ray", RayBackend) except ImportError: msg = ( "To use the ray backend you must install ray." "Try running 'pip install ray'." "See https://docs.ray.io/en/master/installation.html" "for more information." ) raise ImportError(msg) __all__ = ["register_ray"]
39
__init__.py
Python
python/ray/util/joblib/__init__.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
246,874
16
11
8
80
13
0
16
88
test_aggregation_must_be_annotation
Replace assertEquals and friends with non-deprecated versions. (#12092)
https://github.com/matrix-org/synapse.git
def test_aggregation_must_be_annotation(self): channel = self.make_request( "GET", "/_matrix/client/unstable/rooms/%s/aggregations/%s/%s?limit=1" % (self.room, self.parent_id, RelationTypes.REPLACE), access_token=self.user_token, ) self.assertEqual(400, channel.code, channel.json_body)
51
test_relations.py
Python
tests/rest/client/test_relations.py
02d708568b476f2f7716000b35c0adfa4cbd31b3
synapse
1
43,465
11
10
4
59
12
0
12
40
test_get_service_bus_message_conn
Implement Azure Service Bus Queue Operators (#24038) Implemented Azure Service Bus Queue based Operator's to create queue, send message to the queue and receive message(list of message or batch message) and delete queue in azure service - Added `AzureServiceBusCreateQueueOperator` - Added `AzureServiceBusSendMessageOperator` - Added `AzureServiceBusReceiveMessageOperator` - Added `AzureServiceBusDeleteQueueOperator` - Added Example DAG - Added Documentation - Added hooks and connection type in - provider yaml file - Added unit Test case, doc strings
https://github.com/apache/airflow.git
def test_get_service_bus_message_conn(self, mock_connection): mock_connection.return_value = self.conn hook = MessageHook(azure_service_bus_conn_id=self.conn_id) assert isinstance(hook.get_conn(), ServiceBusClient)
36
test_asb.py
Python
tests/providers/microsoft/azure/hooks/test_asb.py
09f38ad3f6872bae5059a1de226362eb358c4a7a
airflow
1
299,440
11
10
7
46
7
0
12
66
available
Fix Sonos races related to grouping and startup (#71026)
https://github.com/home-assistant/core.git
def available(self) -> bool: return ( self.speaker.available and self.speaker.sonos_group_entities and self.media.playback_status )
28
media_player.py
Python
homeassistant/components/sonos/media_player.py
1f1932d224f26ec8b80fa41bea3cf127a97ca7fb
core
3
176,253
66
25
39
413
23
1
126
783
strongly_connected_components
Fixing Tarjan's strongly connected components algorithm implementation to have O(|E|+|V|) time complexity instead of O(|V|^3). (#5288) Prevent unnecessary traversal of edges multiple times
https://github.com/networkx/networkx.git
def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else: lowlink[v] = min([lowlink[v], preorder[w]]) queue.pop() if lowlink[v] == preorder[v]: scc = {v} while scc_queue and preorder[scc_queue[-1]] > preorder[v]: k = scc_queue.pop() scc.add(k) scc_found.update(scc) yield scc else: scc_queue.append(v) @not_implemented_for("undirected")
@not_implemented_for("undirected")
257
strongly_connected.py
Python
networkx/algorithms/components/strongly_connected.py
77c49c16e10693dbe566d20601b28dd2b1e8df03
networkx
15
189,028
50
18
14
232
24
0
72
140
disk_io_counters
Use a generator to avoid a needless list allocation (#1567)
https://github.com/giampaolo/psutil.git
def disk_io_counters(perdisk=False, nowrap=True): kwargs = dict(perdisk=perdisk) if LINUX else {} rawdict = _psplatform.disk_io_counters(**kwargs) if not rawdict: return {} if perdisk else None if nowrap: rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters') nt = getattr(_psplatform, "sdiskio", _common.sdiskio) if perdisk: for disk, fields in rawdict.items(): rawdict[disk] = nt(*fields) return rawdict else: return nt(*(sum(x) for x in zip(*rawdict.values()))) disk_io_counters.cache_clear = functools.partial( _wrap_numbers.cache_clear, 'psutil.disk_io_counters') disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" # ===================================================================== # --- network related functions # =====================================================================
119
__init__.py
Python
psutil/__init__.py
83bb5f8cdcbe10fadf64e62e61d011afe73b6a27
psutil
8
292,401
36
12
13
192
24
0
43
150
post
Only set require_restart on config entry reload if its not recoverable (#66994)
https://github.com/home-assistant/core.git
async def post(self, request, entry_id): if not request["hass_user"].is_admin: raise Unauthorized(config_entry_id=entry_id, permission="remove") hass = request.app["hass"] entry = hass.config_entries.async_get_entry(entry_id) if not entry: return self.json_message("Invalid entry specified", HTTPStatus.NOT_FOUND) assert isinstance(entry, config_entries.ConfigEntry) try: await hass.config_entries.async_reload(entry_id) except config_entries.OperationNotAllowed: return self.json_message("Entry cannot be reloaded", HTTPStatus.FORBIDDEN) return self.json({"require_restart": not entry.state.recoverable})
115
config_entries.py
Python
homeassistant/components/config/config_entries.py
9a5eec561a18cd0bffbbd65afe68f70c0893d28c
core
4
132,696
20
8
29
41
6
0
21
49
testPlacementGroupRequests
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def testPlacementGroupRequests(self, reuse_actors=False, scheduled=10): # Since we check per-step placement groups, set the reconcilation # interval to 0 os.environ["TUNE_PLACEMENT_GROUP_RECON_INTERVAL"] = "0"
181
test_trial_runner_pg.py
Python
python/ray/tune/tests/test_trial_runner_pg.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
269,303
8
8
2
57
9
1
8
12
gelu
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def gelu(x, approximate=False): return tf.nn.gelu(x, approximate) @keras_export("keras.activations.tanh") @tf.__internal__.dispatch.add_dispatch_support
@keras_export("keras.activations.tanh") @tf.__internal__.dispatch.add_dispatch_support
21
activations.py
Python
keras/activations.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
199,700
13
8
13
47
7
0
13
18
legendre_poly
Run orthopolys and appellseqs through a common interface Including unifying the two Chebyshev generators into one function. There are also two kinds of Hermite polynomials, and they too share the same recurrence, but the second type He_n(x) (aka the probabilist, reduced or small polynomials) will not be added here.
https://github.com/sympy/sympy.git
def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys)
33
orthopolys.py
Python
sympy/polys/orthopolys.py
d1d46df73ebaad94089847558d00a8b7269f554d
sympy
1
19,147
7
6
5
28
6
0
7
21
metrics
Improve evaluation api (#5256) * init Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * add shap limitation on value type Signed-off-by: Weichen Xu <[email protected]> * fix format Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]>
https://github.com/mlflow/mlflow.git
def metrics(self) -> Dict[str, Any]: return self._metrics
17
base.py
Python
mlflow/models/evaluation/base.py
4c58179509e6f6047789efb0a95c2b0e20cb6c8f
mlflow
1
260,150
112
16
55
646
25
0
400
1,068
_generate_invalid_param_val_interval
FIX Param validation: fix generating invalid param when 2 interval constraints (#23513) Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _generate_invalid_param_val_interval(interval, constraints): if interval.type is Real: # generate a non-integer value such that it can't be valid even if there's also # an integer interval constraint. if interval.left is None and interval.right is None: if interval.closed in ("left", "neither"): return np.inf elif interval.closed in ("right", "neither"): return -np.inf else: raise NotImplementedError if interval.left is not None: return np.floor(interval.left) - 0.5 else: # right is not None return np.ceil(interval.right) + 0.5 else: # interval.type is Integral if interval.left is None and interval.right is None: raise NotImplementedError # We need to check if there's also a real interval constraint to generate a # value that is not valid for any of the 2 interval constraints. real_intervals = [ i for i in constraints if isinstance(i, Interval) and i.type is Real ] real_interval = real_intervals[0] if real_intervals else None if real_interval is None: # Only the integer interval constraint -> easy if interval.left is not None: return interval.left - 1 else: # interval.right is not None return interval.right + 1 # There's also a real interval constraint. Try to find a value left to both or # right to both or in between them. # redefine left and right bounds to be smallest and largest valid integers in # both intervals. int_left = interval.left if int_left is not None and interval.closed in ("right", "neither"): int_left = int_left + 1 int_right = interval.right if int_right is not None and interval.closed in ("left", "neither"): int_right = int_right - 1 real_left = real_interval.left if real_interval.left is not None: real_left = int(np.ceil(real_interval.left)) if real_interval.closed in ("right", "neither"): real_left = real_left + 1 real_right = real_interval.right if real_interval.right is not None: real_right = int(np.floor(real_interval.right)) if real_interval.closed in ("left", "neither"): real_right = real_right - 1 if int_left is not None and real_left is not None: # there exists an int left to both intervals return min(int_left, real_left) - 1 if int_right is not None and real_right is not None: # there exists an int right to both intervals return max(int_right, real_right) + 1 if int_left is not None: if real_right is not None and int_left - real_right >= 2: # there exists an int between the 2 intervals return int_left - 1 else: raise NotImplementedError else: # int_right is not None if real_left is not None and real_left - int_right >= 2: # there exists an int between the 2 intervals return int_right + 1 else: raise NotImplementedError
399
_param_validation.py
Python
sklearn/utils/_param_validation.py
02cbe01e67165d7d38e5e441cfccd6b57b2207b6
scikit-learn
32
289,375
18
10
5
75
11
0
21
36
test_fetch_period_api_with_no_timestamp
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
https://github.com/home-assistant/core.git
async def test_fetch_period_api_with_no_timestamp(recorder_mock, hass, hass_client): await async_setup_component(hass, "history", {}) client = await hass_client() response = await client.get("/api/history/period") assert response.status == HTTPStatus.OK
43
test_init.py
Python
tests/components/history/test_init.py
31a787558fd312331b55e5c2c4b33341fc3601fc
core
1
172,886
64
17
39
567
40
0
103
331
search_query
Refactored load read status for web access and opds access Refactored and removed discover html page Bugfix show author Bugfix open dialog in author page Fix for #2341 (advanced search with linked read column and read column having a higher number than number of available custom columns)
https://github.com/janeczku/calibre-web.git
def search_query(self, term, config_read_column, *join): term.strip().lower() self.session.connection().connection.connection.create_function("lower", 1, lcase) q = list() authorterms = re.split("[, ]+", term) for authorterm in authorterms: q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%"))) query = self.generate_linked_query(config_read_column, Books) if len(join) == 6: query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3], join[4]).outerjoin(join[5]) if len(join) == 3: query = query.outerjoin(join[0], join[1]).outerjoin(join[2]) elif len(join) == 2: query = query.outerjoin(join[0], join[1]) elif len(join) == 1: query = query.outerjoin(join[0]) return query.filter(self.common_filters(True)).filter( or_(Books.tags.any(func.lower(Tags.name).ilike("%" + term + "%")), Books.series.any(func.lower(Series.name).ilike("%" + term + "%")), Books.authors.any(and_(*q)), Books.publishers.any(func.lower(Publishers.name).ilike("%" + term + "%")), func.lower(Books.title).ilike("%" + term + "%") )) # read search results from calibre-database and return it (function is used for feed and simple search
353
db.py
Python
cps/db.py
32a3c45ee0f7e13bd61075f32a4dcebc415585a1
calibre-web
6
292,764
22
11
13
149
11
0
45
196
async_attribute_updated
Presets for single ZONNSMART TRV (#67157) * Presets for single ZONNSMART TRV * added zonnsmart climate tests * black8 fix
https://github.com/home-assistant/core.git
async def async_attribute_updated(self, record): if record.attr_name == "operation_preset": if record.value == 0: self._preset = PRESET_SCHEDULE if record.value == 1: self._preset = PRESET_NONE if record.value == 2: self._preset = self.PRESET_HOLIDAY if record.value == 3: self._preset = self.PRESET_HOLIDAY if record.value == 4: self._preset = self.PRESET_FROST await super().async_attribute_updated(record)
90
climate.py
Python
homeassistant/components/zha/climate.py
cbdfff25ca510a5d7dff9a1cb39ce2da8afc7d65
core
7
22,658
25
11
7
76
8
0
26
62
axpy
refactor: clean code Signed-off-by: slowy07 <[email protected]>
https://github.com/geekcomputers/Python.git
def axpy(scalar, x, y): # precondition assert ( isinstance(x, Vector) and (isinstance(y, Vector)) and (isinstance(scalar, int) or isinstance(scalar, float)) ) return x * scalar + y
50
lib.py
Python
linear-algebra-python/src/lib.py
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
4
22,718
34
15
17
207
21
0
45
133
search_button
refactor: clean code Signed-off-by: slowy07 <[email protected]>
https://github.com/geekcomputers/Python.git
def search_button(p1): global cursor global results global index w.errorOutput.configure(text="") sql_command = sql_command = sql_command.format(w.inputSearchTitle.get()) try: cursor.execute(sql_command) results = cursor.fetchall() w.errorOutput.configure(text=str(len(results)) + " results") index = 0 if index >= 0 and index < len(results): w.outputNotice.delete(1.0, END) w.outputNotice.insert(1.0, results[index][2]) except: w.errorOutput.configure(text="Please create at first a database.")
128
notepad_support.py
Python
notepad/notepad_support.py
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
4
258,457
105
16
33
473
46
0
147
333
silhouette_samples
FIX Support integers in silhouette_score for precomputed distances (#22108) Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def silhouette_samples(X, labels, *, metric="euclidean", **kwds): X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"]) # Check for non-zero diagonal entries in precomputed distance matrix if metric == "precomputed": error_msg = ValueError( "The precomputed distance matrix contains non-zero " "elements on the diagonal. Use np.fill_diagonal(X, 0)." ) if X.dtype.kind == "f": atol = np.finfo(X.dtype).eps * 100 if np.any(np.abs(np.diagonal(X)) > atol): raise ValueError(error_msg) elif np.any(np.diagonal(X) != 0): # integral dtype raise ValueError(error_msg) le = LabelEncoder() labels = le.fit_transform(labels) n_samples = len(labels) label_freqs = np.bincount(labels) check_number_of_labels(len(le.classes_), n_samples) kwds["metric"] = metric reduce_func = functools.partial( _silhouette_reduce, labels=labels, label_freqs=label_freqs ) results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds)) intra_clust_dists, inter_clust_dists = results intra_clust_dists = np.concatenate(intra_clust_dists) inter_clust_dists = np.concatenate(inter_clust_dists) denom = (label_freqs - 1).take(labels, mode="clip") with np.errstate(divide="ignore", invalid="ignore"): intra_clust_dists /= denom sil_samples = inter_clust_dists - intra_clust_dists with np.errstate(divide="ignore", invalid="ignore"): sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists) # nan values are for clusters of size 1, and should be 0 return np.nan_to_num(sil_samples)
284
_unsupervised.py
Python
sklearn/metrics/cluster/_unsupervised.py
e4015289e0eeb390190ce0d051cee756bc5ecb33
scikit-learn
5
20,452
31
13
10
123
14
0
42
116
get_lexer_for_mimetype
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime)
77
__init__.py
Python
pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
6
118,503
85
13
72
634
25
0
208
583
test_complex_joins
join multiple tables https://github.com/mindsdb/mindsdb_sql/issues/246
https://github.com/mindsdb/mindsdb.git
def test_complex_joins(self, data_handler): df1 = pd.DataFrame([ {'a': 1, 'c': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'c': 1, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'c': 3, 'b': dt.datetime(2020, 1, 3)}, {'a': 3, 'c': 2, 'b': dt.datetime(2020, 1, 2)}, ]) df2 = pd.DataFrame([ {'a': 6, 'c': 1}, {'a': 4, 'c': 2}, {'a': 2, 'c': 3}, ]) self.set_handler(data_handler, name='pg', tables={'tbl1': df1, 'tbl2': df2}) self.run_sql( ) self.run_sql() # --- test join table-table-table --- ret = self.run_sql() # must be 2 rows assert len(ret) == 2 # all t1.a values are 1 assert list(ret.t1a) == [1, 1] # t3.a has 2 and None assert len(ret[ret.t3a == 2]) == 1 assert len(ret[ret.t3a.isna()]) == 1 # --- test join table-predictor-view --- ret = self.run_sql() # must be 2 rows assert len(ret) == 2 # t1.a > 1 assert ret[ret.t1a <= 1].empty # view: a!=4 assert ret[ret.t3a == 4].empty # t3.a has 6 and None assert len(ret[ret.t3a == 6]) == 1 assert len(ret[ret.t3a.isna()]) == 1 # contents predicted values assert list(ret.predicted.unique()) == [42] # --- tests table-subselect-view --- ret = self.run_sql() # 1 row assert len(ret) == 1 # check row values row = ret.iloc[0].to_dict() assert row['t1a'] == 2 assert row['t2t3a'] == 2 assert row['t2t1a'] == 1 assert row['t3c'] == 1 assert row['t3a'] == 6
383
test_project_structure.py
Python
tests/unit/test_project_structure.py
602d5dfea04d73c5c8d4f9d43a6e3deb8a945f18
mindsdb
1
267,759
30
12
12
124
17
0
35
92
get_ci_provider
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def get_ci_provider() -> CIProvider: provider = None import_plugins('ci') candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__)) for candidate in candidates: if candidate.is_supported(): provider = candidate() break if provider.code: display.info('Detected CI provider: %s' % provider.name) return provider
73
__init__.py
Python
test/lib/ansible_test/_internal/ci/__init__.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
4
276,450
48
12
22
282
43
0
61
257
testWithEmbeddings
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def testWithEmbeddings(self): state_input = keras.layers.Input( shape=(1,), name="state_input", dtype="int32" ) output = keras.layers.Embedding( output_dim=16, input_dim=100, input_length=1, name="state" )(state_input) model = keras.models.Model(inputs=[state_input], outputs=[output]) model.compile( loss={"state": "sparse_categorical_crossentropy"}, optimizer="adam" ) # Freeze the graph. sess = keras.backend.get_session() variable_graph_def = sess.graph_def output_tensor = self._get_tensor_names(model.outputs) constant_graph_def = ( tf.compat.v1.graph_util.convert_variables_to_constants( sess, variable_graph_def, output_tensor ) ) # Validate converted graph. input_data = np.array(np.random.random_sample([1, 1]), dtype=np.int32) self._ensure_no_variables_in_graph(constant_graph_def) self._test_converted_keras_model(model, constant_graph_def, input_data)
177
graph_util_test.py
Python
keras/tests/graph_util_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
19,671
48
12
21
212
28
0
61
255
sys_path
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
https://github.com/pypa/pipenv.git
def sys_path(self): # type: () -> List[str] from .vendor.vistir.compat import JSONDecodeError current_executable = Path(sys.executable).as_posix() if not self.python or self.python == current_executable: return sys.path elif any([sys.prefix == self.prefix, not self.is_venv]): return sys.path cmd_args = [self.python, "-c", "import json, sys; print(json.dumps(sys.path))"] path, _ = vistir.misc.run( cmd_args, return_object=False, nospin=True, block=True, combine_stderr=False, write_to_stdout=False, ) try: path = json.loads(path.strip()) except JSONDecodeError: path = sys.path return path
134
environment.py
Python
pipenv/environment.py
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
pipenv
5
250,139
138
17
121
991
49
0
275
1,863
test_out_of_order_events
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
https://github.com/matrix-org/synapse.git
def test_out_of_order_events(self) -> None: event_factory = self.hs.get_event_builder_factory() bob = "@creator:test" alice = "@alice:test" room_id = "!room:test" # Ensure that we have a rooms entry so that we generate the chain index. self.get_success( self.store.store_room( room_id=room_id, room_creator_user_id="", is_public=True, room_version=RoomVersions.V6, ) ) # First persist the base room. create = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Create, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "create"}, }, ).build(prev_event_ids=[], auth_event_ids=[]) ) bob_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": bob, "sender": bob, "room_id": room_id, "content": {"tag": "bob_join"}, }, ).build(prev_event_ids=[], auth_event_ids=[create.event_id]) ) power = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.PowerLevels, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "power"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id], ) ) self.persist([create, bob_join, power]) # Now persist an invite and a couple of memberships out of order. alice_invite = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": bob, "room_id": room_id, "content": {"tag": "alice_invite"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": alice, "room_id": room_id, "content": {"tag": "alice_join"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id], ) ) alice_join2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": alice, "room_id": room_id, "content": {"tag": "alice_join2"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, alice_join.event_id, power.event_id], ) ) self.persist([alice_join]) self.persist([alice_join2]) self.persist([alice_invite]) # The end result should be sane. events = [create, bob_join, power, alice_invite, alice_join] chain_map, link_map = self.fetch_chains(events) expected_links = [ (bob_join, create), (power, create), (power, bob_join), (alice_invite, create), (alice_invite, power), (alice_invite, bob_join), ] # Check that the expected links and only the expected links have been # added. self.assertEqual(len(expected_links), len(list(link_map.get_additions()))) for start, end in expected_links: start_id, start_seq = chain_map[start.event_id] end_id, end_seq = chain_map[end.event_id] self.assertIn( (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id)) )
620
test_event_chain.py
Python
tests/storage/test_event_chain.py
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
synapse
2
246,112
26
11
14
180
17
0
27
130
test_limit_and_from
Add admin API to get a list of federated rooms (#11658)
https://github.com/matrix-org/synapse.git
def test_limit_and_from(self) -> None: number_destinations = 20 self._create_destinations(number_destinations) channel = self.make_request( "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(channel.json_body["next_token"], "15") self.assertEqual(len(channel.json_body["destinations"]), 10) self._check_fields(channel.json_body["destinations"])
109
test_federation.py
Python
tests/rest/admin/test_federation.py
6a72c910f180ee8b4bd78223775af48492769472
synapse
1
167,802
68
11
42
290
27
0
96
195
cartesian_product
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
https://github.com/pandas-dev/pandas.git
def cartesian_product(X) -> list[np.ndarray]: msg = "Input must be a list-like of list-likes" if not is_list_like(X): raise TypeError(msg) for x in X: if not is_list_like(x): raise TypeError(msg) if len(X) == 0: return [] lenX = np.fromiter((len(x) for x in X), dtype=np.intp) cumprodX = np.cumproduct(lenX) if np.any(cumprodX < 0): raise ValueError("Product space too large to allocate arrays!") a = np.roll(cumprodX, 1) a[0] = 1 if cumprodX[-1] != 0: b = cumprodX[-1] / cumprodX else: # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]
182
util.py
Python
pandas/core/reshape/util.py
f65417656ba8c59438d832b6e2a431f78d40c21c
pandas
9
273,407
6
8
2
38
5
0
6
20
set_vocabulary
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def set_vocabulary(self, vocabulary, idf_weights=None): self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
24
text_vectorization.py
Python
keras/layers/preprocessing/text_vectorization.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
189,932
30
15
14
170
16
0
37
111
select_resolution
Moved functionality over from `manim new` to `manim init` and added deprecation warning for `manim new` (#2842) * moved functionality over from manim new to manim init and deprecated manim new * Updated tests to fit new functionality * added a test for project creation using manim init project * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Benjamin Hackl <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
https://github.com/ManimCommunity/manim.git
def select_resolution(): resolution_options = [] for quality in QUALITIES.items(): resolution_options.append( (quality[1]["pixel_height"], quality[1]["pixel_width"]), ) resolution_options.pop() choice = click.prompt( "\nSelect resolution:\n", type=click.Choice([f"{i[0]}p" for i in resolution_options]), show_default=False, default="480p", ) return [res for res in resolution_options if f"{res[0]}p" == choice][0]
95
commands.py
Python
manim/cli/init/commands.py
9b731037d9f2c9e647737c9ca44f1a968675feb6
manim
5
12,699
8
9
6
33
7
0
8
22
is_decompressed_with_data
feat: allow to access parameters of data request wo loading data (#4991)
https://github.com/jina-ai/jina.git
def is_decompressed_with_data(self) -> bool: return type(self._pb_body) is jina_pb2.DataRequestProto
19
data.py
Python
jina/types/request/data.py
c3849c6fee4a65a77a82b2cfda9670d727ff0f53
jina
1