id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
47,513
97
13
28
347
40
0
136
402
test_timeout_triggers
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_timeout_triggers(self, dag_maker): session = settings.Session() # Create the test DAG and task with dag_maker( dag_id='test_timeout_triggers', start_date=DEFAULT_DATE, schedule_interval='@once', max_active_runs=1, session=session, ): EmptyOperator(task_id='dummy1') # Create a Task Instance for the task that is allegedly deferred # but past its timeout, and one that is still good. # We don't actually need a linked trigger here; the code doesn't check. dr1 = dag_maker.create_dagrun() dr2 = dag_maker.create_dagrun( run_id="test2", execution_date=DEFAULT_DATE + datetime.timedelta(seconds=1) ) ti1 = dr1.get_task_instance('dummy1', session) ti2 = dr2.get_task_instance('dummy1', session) ti1.state = State.DEFERRED ti1.trigger_timeout = timezone.utcnow() - datetime.timedelta(seconds=60) ti2.state = State.DEFERRED ti2.trigger_timeout = timezone.utcnow() + datetime.timedelta(seconds=60) session.flush() # Boot up the scheduler and make it check timeouts self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.check_trigger_timeouts(session=session) # Make sure that TI1 is now scheduled to fail, and 2 wasn't touched session.refresh(ti1) session.refresh(ti2) assert ti1.state == State.SCHEDULED assert ti1.next_method == "__fail__" assert ti2.state == State.DEFERRED
207
test_scheduler_job.py
Python
tests/jobs/test_scheduler_job.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
1
275,522
23
10
6
69
7
0
27
59
_var_key
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if getattr(var, "_in_graph_mode", False): return var._shared_name return var._unique_id
39
optimizer_v2.py
Python
keras/optimizers/optimizer_v2/optimizer_v2.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
153,567
19
11
7
75
9
0
20
86
diff
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
https://github.com/modin-project/modin.git
def diff(self, periods=1, axis=0): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.diff( fold_axis=axis, axis=axis, periods=periods ) )
48
base.py
Python
modin/pandas/base.py
605efa618e7994681f57b11d04d417f353ef8d50
modin
1
243,968
66
13
21
325
38
0
91
218
draw_masks
[Feature] Support visualization for Panoptic Segmentation (#7041) * First commit of v2 * split the functions * Support to show panoptic result * temp * Support to show gt * support show gt * fix lint * Support to browse datasets * Fix unit tests * Fix findContours * fix comments * Fix pre-commit * fix lint * Add the type of an argument
https://github.com/open-mmlab/mmdetection.git
def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8): taken_colors = set([0, 0, 0]) if color is None: random_colors = np.random.randint(0, 255, (masks.size(0), 3)) color = [tuple(c) for c in random_colors] color = np.array(color, dtype=np.uint8) polygons = [] for i, mask in enumerate(masks): if with_edge: contours, _ = bitmap_to_polygon(mask) polygons += [Polygon(c) for c in contours] color_mask = color[i] while tuple(color_mask) in taken_colors: color_mask = _get_bias_color(color_mask) taken_colors.add(tuple(color_mask)) mask = mask.astype(bool) img[mask] = img[mask] * (1 - alpha) + color_mask * alpha p = PatchCollection( polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8) ax.add_collection(p) return ax, img
217
image.py
Python
mmdet/core/visualization/image.py
301d4a2d4cfe1cdb62608e2892924be3e67e3098
mmdetection
7
277,143
3
6
3
18
3
0
3
6
inject_argument_info_in_traceback
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def inject_argument_info_in_traceback(fn, object_name=None):
25
traceback_utils.py
Python
keras/utils/traceback_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
215,557
140
17
50
563
48
0
211
790
fire_master
Use salt.channel.client instead of salt.transport.client
https://github.com/saltstack/salt.git
def fire_master(data, tag, preload=None): if ( __opts__.get("local", None) or __opts__.get("file_client", None) == "local" ) and not __opts__.get("use_master_when_local", False): # We can't send an event if we're in masterless mode log.warning("Local mode detected. Event with tag %s will NOT be sent.", tag) return False if preload or __opts__.get("__cli") == "salt-call": # If preload is specified, we must send a raw event (this is # slower because it has to independently authenticate) if "master_uri" not in __opts__: __opts__["master_uri"] = "tcp://{ip}:{port}".format( ip=salt.utils.zeromq.ip_bracket(__opts__["interface"]), port=__opts__.get("ret_port", "4506"), # TODO, no fallback ) masters = list() ret = True if "master_uri_list" in __opts__: for master_uri in __opts__["master_uri_list"]: masters.append(master_uri) else: masters.append(__opts__["master_uri"]) auth = salt.crypt.SAuth(__opts__) load = { "id": __opts__["id"], "tag": tag, "data": data, "tok": auth.gen_token(b"salt"), "cmd": "_minion_event", } if isinstance(preload, dict): load.update(preload) for master in masters: with salt.channel.client.ReqChannel.factory( __opts__, master_uri=master ) as channel: try: channel.send(load) # channel.send was successful. # Ensure ret is True. ret = True except Exception: # pylint: disable=broad-except ret = False return ret else: # Usually, we can send the event via the minion, which is faster # because it is already authenticated try: return salt.utils.event.MinionEvent(__opts__, listen=False).fire_event( {"data": data, "tag": tag, "events": None, "pretag": None}, "fire_master", ) except Exception: # pylint: disable=broad-except exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
326
event.py
Python
salt/modules/event.py
70972c8016ff5d6fbdd7f83776077b0936f60dea
salt
13
257,683
23
13
6
115
16
0
26
72
test__create_document_index_wrong_mapping_raises
Use opensearch-py in OpenSearchDocumentStore (#2691) * add Opensearch extras * let OpenSearchDocumentStore use opensearch-py * Update Documentation & Code Style * fix a bug found after adding tests Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Sara Zan <[email protected]>
https://github.com/deepset-ai/haystack.git
def test__create_document_index_wrong_mapping_raises(self, mocked_document_store, index): mocked_document_store.search_fields = ["age"] mocked_document_store.client.indices.exists.return_value = True mocked_document_store.client.indices.get.return_value = {self.index_name: index} with pytest.raises(Exception, match=f"The search_field 'age' of index '{self.index_name}' with type 'integer'"): mocked_document_store._create_document_index(self.index_name)
66
test_opensearch.py
Python
test/document_stores/test_opensearch.py
e7627c3f8b241654b61f8523479c81f855102f0a
haystack
1
119,982
12
11
5
64
10
0
12
99
bcoo_broadcast_in_dim
[sparse] Update docstrings for bcoo primitives. PiperOrigin-RevId: 438685829
https://github.com/google/jax.git
def bcoo_broadcast_in_dim(mat, *, shape, broadcast_dimensions): return BCOO(_bcoo_broadcast_in_dim(mat.data, mat.indices, spinfo=mat._info, shape=shape, broadcast_dimensions=broadcast_dimensions), shape=shape)
44
bcoo.py
Python
jax/experimental/sparse/bcoo.py
3184dd65a222354bffa2466d9a375162f5649132
jax
1
70,190
30
13
7
67
10
0
34
106
connect_docker
First version. Container list and CPU ok, need others stats
https://github.com/nicolargo/glances.git
def connect_docker(self): try: # Do not use the timeout option (see issue #1878) ret = docker.from_env() except Exception as e: logger.error("docker plugin - Can not connect to Docker ({})".format(e)) ret = None return ret
36
glances_docker.py
Python
glances/plugins/glances_docker.py
0e098407e15bc1dc341c571cec8fd34ecb4b0943
glances
2
309,563
37
15
26
185
21
0
41
267
favorites_folder_payload
Allow browsing favorites in Sonos media browser (#64082) * Allow browsing favorites in Sonos media browser * Group favorites by type, add thumbnails * Update homeassistant/components/sonos/media_player.py * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Keep favorite groups ordering consistent * Skip root folder if only one child available Co-authored-by: Jason Lawrence <[email protected]> Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
def favorites_folder_payload(favorites, media_content_id): children = [] content_type = SONOS_TYPES_MAPPING[media_content_id] for favorite in favorites: if favorite.reference.item_class != media_content_id: continue children.append( BrowseMedia( title=favorite.title, media_class=SONOS_TO_MEDIA_CLASSES[favorite.reference.item_class], media_content_id=favorite.item_id, media_content_type="favorite_item_id", can_play=True, can_expand=False, thumbnail=getattr(favorite, "album_art_uri", None), ) ) return BrowseMedia( title=content_type.title(), media_class=MEDIA_CLASS_DIRECTORY, media_content_id="", media_content_type="favorites", can_play=False, can_expand=True, children=children, )
123
media_browser.py
Python
homeassistant/components/sonos/media_browser.py
2f18058fe739cabc0495a14e00df3ced60009ba2
core
3
82,431
38
17
21
308
30
0
56
279
test_sitemap_unpublished_titles
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
https://github.com/django-cms/django-cms.git
def test_sitemap_unpublished_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() unpublished_titles = set() for item in urlset: locations.append(item['location']) for page in Page.objects.drafts(): if page.get_public_object(): set1 = set(page.get_public_object().title_set.values_list('path', flat=True)) set2 = set(page.title_set.values_list('path', flat=True)) unpublished_titles.update(set2.difference(set1)) else: unpublished_titles.update(page.title_set.values_list('path', flat=True)) for path in unpublished_titles: title = Title.objects.get(path=path) if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' self.assertFalse(url in locations)
167
test_sitemap.py
Python
cms/tests/test_sitemap.py
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
6
104,073
16
13
7
111
13
0
20
53
xsplitext
Add SVHN dataset (#3535) * Add SVHN * Add support for streaming * README fix * Don't specify labels twice
https://github.com/huggingface/datasets.git
def xsplitext(a): a, *b = a.split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext
64
streaming_download_manager.py
Python
src/datasets/utils/streaming_download_manager.py
a6ff8d6fb5ba2770737d2d922976c50f480d23a2
datasets
2
249,435
26
12
17
133
13
0
27
162
test_400_missing_param_without_id_access_token
Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token` (#13241) Fixes #13206 Signed-off-by: Jacek Kusnierz [email protected]
https://github.com/matrix-org/synapse.git
def test_400_missing_param_without_id_access_token(self) -> None: channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", content={ "id_server": "example.com", "medium": "email", "address": "[email protected]", }, access_token=self.tok, ) self.assertEqual(channel.code, 400) self.assertEqual(channel.json_body["errcode"], "M_MISSING_PARAM")
75
test_rooms.py
Python
tests/rest/client/test_rooms.py
84ddcd7bbfe4100101741a408a91f283a8f742c7
synapse
1
14,004
8
9
27
39
6
0
9
38
__anext__
refactor: avoid run in executor creating threads (#5518)
https://github.com/jina-ai/jina.git
async def __anext__(self): if isinstance(self.iterator, Iterator): if not self._iterate_sync_in_thread:
108
helper.py
Python
jina/serve/stream/helper.py
5a0830cfb6bfa33dcffb38681f86efe5f6f0f97c
jina
7
153,106
20
10
4
44
6
0
21
64
_check_index_name
FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373) Signed-off-by: Dmitry Chigarev <[email protected]>
https://github.com/modin-project/modin.git
def _check_index_name(self, result): if self._by is not None: # pandas does not name the index for this case result._query_compiler.set_index_name(None) return result
26
groupby.py
Python
modin/pandas/groupby.py
1e65a4afd191cf61ba05b80545d23f9b88962f41
modin
2
64,242
120
19
48
708
45
0
214
162
update_billed_amount_based_on_so
refactor: use frappe.qb instead of sql (cherry picked from commit 0a9ec9f591f8b4d0e630a3c902b69c9996f080dd)
https://github.com/frappe/erpnext.git
def update_billed_amount_based_on_so(so_detail, update_modified=True): from frappe.query_builder.functions import Sum # Billed against Sales Order directly si = frappe.qb.DocType("Sales Invoice").as_("si") si_item = frappe.qb.DocType("Sales Invoice Item").as_("si_item") sum_amount = Sum(si_item.amount).as_("amount") billed_against_so = frappe.qb.from_(si).from_(si_item).select(sum_amount).where( (si_item.parent == si.name) & (si_item.so_detail == so_detail) & ((si_item.dn_detail.isnull()) | (si_item.dn_detail == '')) & (si_item.docstatus == 1) & (si.update_stock == 0) ).run() billed_against_so = billed_against_so and billed_against_so[0][0] or 0 # Get all Delivery Note Item rows against the Sales Order Item row dn = frappe.qb.DocType("Delivery Note").as_("dn") dn_item = frappe.qb.DocType("Delivery Note Item").as_("dn_item") dn_details = frappe.qb.from_(dn).from_(dn_item).select(dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent, dn_item.stock_qty, dn_item.returned_qty).where( (dn.name == dn_item.parent) & (dn_item.so_detail == so_detail) & (dn.docstatus == 1) & (dn.is_return == 0) ).orderby( dn.posting_date, dn.posting_time, dn.name ).run(as_dict=True) updated_dn = [] for dnd in dn_details: billed_amt_agianst_dn = 0 # If delivered against Sales Invoice if dnd.si_detail: billed_amt_agianst_dn = flt(dnd.amount) billed_against_so -= billed_amt_agianst_dn else: # Get billed amount directly against Delivery Note billed_amt_agianst_dn = frappe.db.sql(, dnd.name) billed_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0 # Distribute billed amount directly against SO between DNs based on FIFO if billed_against_so and billed_amt_agianst_dn < dnd.amount: if dnd.returned_qty: pending_to_bill = flt(dnd.amount) * (dnd.stock_qty - dnd.returned_qty) / dnd.stock_qty else: pending_to_bill = flt(dnd.amount) pending_to_bill -= billed_amt_agianst_dn if pending_to_bill <= billed_against_so: billed_amt_agianst_dn += pending_to_bill billed_against_so -= pending_to_bill else: billed_amt_agianst_dn += billed_against_so billed_against_so = 0 frappe.db.set_value("Delivery Note Item", dnd.name, "billed_amt", billed_amt_agianst_dn, update_modified=update_modified) updated_dn.append(dnd.parent) return updated_dn
440
delivery_note.py
Python
erpnext/stock/doctype/delivery_note/delivery_note.py
ce0b84f54d495fc78a6792a9b05d0eb1dc799ed2
erpnext
11
257,136
68
11
13
206
30
1
82
148
mock_json_schema
Change YAML version exception into a warning (#2385) * Change exception into warning, add strict_version param, and remove compatibility between schemas * Simplify update_json_schema * Rename unstable into master * Prevent validate_config from changing the config to validate * Fix version validation and add tests * Rename master into ignore * Complete parameter rename Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if "integration" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, "find_subclasses_in_modules", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path) # Generate mock schema in tmp_path filename = f"haystack-pipeline-master.schema.json" test_schema = _json_schema.get_json_schema(filename=filename, version="ignore") with open(tmp_path / filename, "w") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch
@pytest.mark.integration @pytest.mark.elasticsearch
114
test_pipeline_yaml.py
Python
test/test_pipeline_yaml.py
4eec2dc45ee60e8b8780aa4f956aea8ad3624da3
haystack
2
176,591
6
7
2
25
4
0
6
12
node_connected_component
Added examples in connected and strongly connected functions (#5559) * added examples * Update networkx/algorithms/components/connected.py Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
https://github.com/networkx/networkx.git
def node_connected_component(G, n): return _plain_bfs(G, n)
15
connected.py
Python
networkx/algorithms/components/connected.py
7cad29b3542ad867f1eb5b7b6a9087495f252749
networkx
1
199,702
33
13
9
132
13
1
42
84
dup_hermite
Run orthopolys and appellseqs through a common interface Including unifying the two Chebyshev generators into one function. There are also two kinds of Hermite polynomials, and they too share the same recurrence, but the second type He_n(x) (aka the probabilist, reduced or small polynomials) will not be added here.
https://github.com/sympy/sympy.git
def dup_hermite(n, K): if n < 1: return [1] m2, m1 = [1], [2, 0] for i in range(2, n+1): a = dup_lshift(m1, 1, K) b = dup_mul_ground(m2, i-1, K) m2, m1 = m1, dup_mul_ground(dup_sub(a, b, K), 2, K) return m1 @public
@public
87
orthopolys.py
Python
sympy/polys/orthopolys.py
d1d46df73ebaad94089847558d00a8b7269f554d
sympy
3
155,341
84
22
43
473
23
0
173
786
_build_repr_df
REFACTOR-#5310: Remove some hasattr('columns') checks. (#5311) Signed-off-by: mvashishtha <[email protected]>
https://github.com/modin-project/modin.git
def _build_repr_df(self, num_rows, num_cols): # Fast track for empty dataframe. if len(self.index) == 0 or (self._is_dataframe and len(self.columns) == 0): return pandas.DataFrame( index=self.index, columns=self.columns if self._is_dataframe else None, ) if len(self.index) <= num_rows: row_indexer = slice(None) else: # Add one here so that pandas automatically adds the dots # It turns out to be faster to extract 2 extra rows and columns than to # build the dots ourselves. num_rows_for_head = num_rows // 2 + 1 num_rows_for_tail = ( num_rows_for_head if len(self.index) > num_rows else len(self.index) - num_rows_for_head if len(self.index) - num_rows_for_head >= 0 else None ) row_indexer = list(range(len(self.index))[:num_rows_for_head]) + ( list(range(len(self.index))[-num_rows_for_tail:]) if num_rows_for_tail is not None else [] ) if self._is_dataframe: if len(self.columns) <= num_cols: col_indexer = slice(None) else: num_cols_for_front = num_cols // 2 + 1 num_cols_for_back = ( num_cols_for_front if len(self.columns) > num_cols else len(self.columns) - num_cols_for_front if len(self.columns) - num_cols_for_front >= 0 else None ) col_indexer = list(range(len(self.columns))[:num_cols_for_front]) + ( list(range(len(self.columns))[-num_cols_for_back:]) if num_cols_for_back is not None else [] ) indexer = row_indexer, col_indexer else: indexer = row_indexer return self.iloc[indexer]._query_compiler.to_pandas()
295
base.py
Python
modin/pandas/base.py
2ebc9cf51bfc773e3d4c898f5a33c0f60ad7ebc5
modin
14
131,824
65
13
19
335
24
1
117
269
test_get_conda_env_dir
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_get_conda_env_dir(tmp_path): # Simulate starting in an env named tf1. d = tmp_path / "envs" / "tf1" Path.mkdir(d, parents=True) with mock.patch.dict( os.environ, {"CONDA_PREFIX": str(d), "CONDA_DEFAULT_ENV": "tf1"} ): with pytest.raises(ValueError): # Env tf2 should not exist. env_dir = get_conda_env_dir("tf2") tf2_dir = tmp_path / "envs" / "tf2" Path.mkdir(tf2_dir, parents=True) env_dir = get_conda_env_dir("tf2") assert env_dir == str(tmp_path / "envs" / "tf2") # Simulate starting in (base) conda env. with mock.patch.dict( os.environ, {"CONDA_PREFIX": str(tmp_path), "CONDA_DEFAULT_ENV": "base"} ): with pytest.raises(ValueError): # Env tf3 should not exist. env_dir = get_conda_env_dir("tf3") # Env tf2 still should exist. env_dir = get_conda_env_dir("tf2") assert env_dir == str(tmp_path / "envs" / "tf2") @pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="This test is only run on linux CI machines.", )
@pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="This test is only run on linux CI machines.", )
152
test_runtime_env_complicated.py
Python
python/ray/tests/test_runtime_env_complicated.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
166,973
14
9
2
45
3
0
14
35
reso
DOC: Added docstrings to fixtures defined in array module (#47211)
https://github.com/pandas-dev/pandas.git
def reso(self, unit): # TODO: avoid hard-coding return {"s": 7, "ms": 8, "us": 9}[unit]
25
test_datetimes.py
Python
pandas/tests/arrays/test_datetimes.py
89be1f053b695c4ce1c0569f737caf3f03c12128
pandas
1
267,468
18
10
4
48
7
0
18
50
set_queue
Forked display via queue (#77056) * Forked Display via queue * Docs and simple code cleanup * Only proxy Display.display * Remove unused import * comment * Update deadlock comment, remove py3 check * Don't flush display, and don't lock from forks * clog frag * ci_complete ci_coverage * Add units for queue proxying * Cleanup flush * ci_complete * Only lock the write, switch to RLock * Remove unused import
https://github.com/ansible/ansible.git
def set_queue(self, queue): if multiprocessing_context.parent_process() is None: raise RuntimeError('queue cannot be set in parent process') self._final_q = queue
27
display.py
Python
lib/ansible/utils/display.py
5e369604e1930b1a2e071fecd7ec5276ebd12cb1
ansible
2
124,651
32
13
32
220
24
0
44
421
_get_tune_run_arguments
[AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307) This PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available. In order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF. Please note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle.
https://github.com/ray-project/ray.git
def _get_tune_run_arguments(self) -> Dict[str, Any]: return dict( mode=self._tune_config.mode, metric=self._tune_config.metric, callbacks=self._run_config.callbacks, sync_config=self._run_config.sync_config, stop=self._run_config.stop, max_failures=( self._run_config.failure_config.max_failures if self._run_config.failure_config else 0 ), keep_checkpoints_num=( self._run_config.checkpoint_config.num_to_keep if self._run_config.checkpoint_config else None ), checkpoint_score_attr=( self._run_config.checkpoint_config._tune_legacy_checkpoint_score_attr if self._run_config.checkpoint_config else None ), _experiment_checkpoint_dir=self._experiment_checkpoint_dir, raise_on_failed_trial=False, fail_fast=( self._run_config.failure_config.fail_fast if self._run_config.failure_config else False ), verbose=self._run_config.verbose, )
155
tuner_internal.py
Python
python/ray/tune/impl/tuner_internal.py
b3878e26d765e28dd7c69abadbd856181037db97
ray
5
274,552
31
10
13
134
15
0
43
110
get
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def get(identifier): if identifier is None: return None if isinstance(identifier, str): identifier = str(identifier) return deserialize(identifier) if isinstance(identifier, dict): return deserialize(identifier) if callable(identifier): return identifier raise ValueError( f"Could not interpret loss function identifier: {identifier}" ) LABEL_DTYPES_FOR_LOSSES = { tf.compat.v1.losses.sparse_softmax_cross_entropy: "int32", sparse_categorical_crossentropy: "int32", }
59
losses.py
Python
keras/losses.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
5
288,074
33
9
12
126
15
1
37
84
test_discover_device_tracker
Move MQTT discovery hass.data globals to dataclass (#78706) * Add MQTT discovery hass.data globals to dataclass * isort * Additional rework * Add hass.data["mqtt_tags"] to dataclass * Follow-up comment * Corrections
https://github.com/home-assistant/core.git
async def test_discover_device_tracker(hass, mqtt_mock_entry_no_yaml_config, caplog): await mqtt_mock_entry_no_yaml_config() async_fire_mqtt_message( hass, "homeassistant/device_tracker/bla/config", '{ "name": "test", "state_topic": "test_topic" }', ) await hass.async_block_till_done() state = hass.states.get("device_tracker.test") assert state is not None assert state.name == "test" assert ("device_tracker", "bla") in hass.data["mqtt"].discovery_already_discovered @pytest.mark.no_fail_on_log_exception
@pytest.mark.no_fail_on_log_exception
65
test_device_tracker_discovery.py
Python
tests/components/mqtt/test_device_tracker_discovery.py
84b2c74746b694d217fe6d448a8dfff4bc2d7a9e
core
1
263,853
106
13
19
216
21
0
165
283
_get_module_collection_mode
building: more module collection modes, consolidate noarchive codepath Map the module collection mode strings into (combinations of) integer flags that control try basic collection modes: - collect a pyc into PYZ archive - collect a pyc as a data file - collect a py as a data file Consolidate the `noarchive=True` codepath into module collection mode, where "collect a pyc into PYZ archive" flag is swapped for a "collect a pyc as a data file". The new collection mode also implicitly fixes couple of minor annoyances of the `noarchive=True` mode: - the user-writable paths containing python source code are not littered with pyc/pyo files anymore; all pycs are now gathered in build directory - the name of pycs in local build directory are not mangled anymore (was previously the case for pycs that could not be written to their original locations due to lack of permissions) - the pycs have code paths stripped from them, same as in noarchive=False mode
https://github.com/pyinstaller/pyinstaller.git
def _get_module_collection_mode(mode_dict, name, noarchive=False): # Default mode: collect into PYZ, unless noarchive is enabled. In that case, collect as pyc. mode_flags = _ModuleCollectionMode.PYC if noarchive else _ModuleCollectionMode.PYZ # If we have no collection mode settings, end here and now. if not mode_dict: return mode_flags # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that # a setting given for the top-level package is recursively propagated to all its subpackages and submodules, # but also allows individual sub-modules to override the setting again. mode = 'pyz' name_parts = name.split('.') for i in range(len(name_parts)): modlevel = ".".join(name_parts[:i + 1]) modlevel_mode = mode_dict.get(modlevel, None) if modlevel_mode is not None: mode = modlevel_mode # Convert mode string to _ModuleCollectionMode flags try: mode_flags = _MODULE_COLLECTION_MODES[mode] except KeyError: raise ValueError(f"Unknown module collection mode for {name!r}: {mode!r}!") # noarchive flag being set means that we need to change _ModuleCollectionMode.PYZ into _ModuleCollectionMode.PYC if noarchive and _ModuleCollectionMode.PYZ in mode_flags: mode_flags ^= _ModuleCollectionMode.PYZ mode_flags |= _ModuleCollectionMode.PYC return mode_flags
122
build_main.py
Python
PyInstaller/building/build_main.py
6e1bfa2de254d8ae302f54dcea0cfefae4dd3585
pyinstaller
8
160,904
39
11
7
143
18
0
50
145
test_error_message_unsigned
TST: Add a failing test case to demonstrate the bug gh2176
https://github.com/numpy/numpy.git
def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y # and # y - x x = np.asarray([0, 1, 8], dtype='uint8') y = np.asarray([4, 4, 4], dtype='uint8') with pytest.raises(AssertionError) as exc_info: assert_allclose(x, y, atol=3) msgs = str(exc_info.value).split('\n') assert_equal(msgs[4], 'Max absolute difference: 4')
84
test_utils.py
Python
numpy/testing/tests/test_utils.py
57d04d883e874c611091933c4c36e1cd43ea0e04
numpy
1
211,439
48
12
26
272
33
0
73
343
forward
Update box head (#6804) * add flag loss_normalize_pos * add faster vitdet * update faster vitdet
https://github.com/PaddlePaddle/PaddleDetection.git
def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None): if self.training: rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs) self.assigned_rois = (rois, rois_num) self.assigned_targets = targets rois_feat = self.roi_extractor(body_feats, rois, rois_num) bbox_feat = self.head(rois_feat) if self.with_pool: feat = F.adaptive_avg_pool2d(bbox_feat, output_size=1) feat = paddle.squeeze(feat, axis=[2, 3]) else: feat = bbox_feat scores = self.bbox_score(feat) deltas = self.bbox_delta(feat) if self.training: loss = self.get_loss( scores, deltas, targets, rois, self.bbox_weight, loss_normalize_pos=self.loss_normalize_pos) return loss, bbox_feat else: pred = self.get_prediction(scores, deltas) return pred, self.head
182
bbox_head.py
Python
ppdet/modeling/heads/bbox_head.py
0b8165cf0029495d9b51007863d73c7db414313f
PaddleDetection
4
107,161
14
11
8
129
17
0
18
42
test_constrained_layout22
ENH: implement and use base layout_engine for more flexible layout.
https://github.com/matplotlib/matplotlib.git
def test_constrained_layout22(): fig, ax = plt.subplots(layout="constrained") fig.draw_without_rendering() extents0 = np.copy(ax.get_position().extents) fig.suptitle("Suptitle", y=0.5) fig.draw_without_rendering() extents1 = np.copy(ax.get_position().extents) np.testing.assert_allclose(extents0, extents1)
77
test_constrainedlayout.py
Python
lib/matplotlib/tests/test_constrainedlayout.py
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
matplotlib
1
107,536
70
17
31
380
26
0
98
376
encode_multipart_formdata
Properly capitalize "Unicode". See e.g. https://en.wikipedia.org/wiki/Unicode, https://docs.python.org/3/howto/unicode.html. Also associated minor doc cleanups.
https://github.com/matplotlib/matplotlib.git
def encode_multipart_formdata(fields, boundary=None): # copy requests imports in here: from io import BytesIO from requests.packages.urllib3.filepost import ( choose_boundary, writer, b, get_content_type ) body = BytesIO() if boundary is None: boundary = choose_boundary() for fieldname, value in iter_fields(fields): body.write(b('--%s\r\n' % (boundary))) if isinstance(value, tuple): filename, data = value writer(body).write('Content-Disposition: form-data; name="%s"; ' 'filename="%s"\r\n' % (fieldname, filename)) body.write(b('Content-Type: %s\r\n\r\n' % (get_content_type(filename)))) else: data = value writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % (fieldname)) body.write(b'Content-Type: text/plain\r\n\r\n') if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, str): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = b('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
215
gh_api.py
Python
tools/gh_api.py
88cb4c9d0aa1e790fc4689ca7e68725bf851bf63
matplotlib
6
155,520
181
15
66
824
71
0
299
798
take
DOC: normalize whitespace in doctests in slicing.py (#8512)
https://github.com/dask/dask.git
def take(outname, inname, chunks, index, itemsize, axis=0): from .core import PerformanceWarning plan = slicing_plan(chunks[axis], index) if len(plan) >= len(chunks[axis]) * 10: factor = math.ceil(len(plan) / len(chunks[axis])) warnings.warn( "Slicing with an out-of-order index is generating %d " "times more chunks" % factor, PerformanceWarning, stacklevel=6, ) if not is_arraylike(index): index = np.asarray(index) # Check for chunks from the plan that would violate the user's # configured chunk size. nbytes = utils.parse_bytes(config.get("array.chunk-size")) other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis] other_numel = np.prod([sum(x) for x in other_chunks]) if math.isnan(other_numel): warnsize = maxsize = math.inf else: maxsize = math.ceil(nbytes / (other_numel * itemsize)) warnsize = maxsize * 5 split = config.get("array.slicing.split-large-chunks", None) # Warn only when the default is not specified. warned = split is not None for _, index_list in plan: if not warned and len(index_list) > warnsize: msg = ( "Slicing is producing a large chunk. To accept the large\n" "chunk and silence this warning, set the option\n" " >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n" " ... array[indexer]\n\n" "To avoid creating the large chunks, set the option\n" " >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n" " ... array[indexer]" ) warnings.warn(msg, PerformanceWarning, stacklevel=6) warned = True where_index = [] index_lists = [] for where_idx, index_list in plan: index_length = len(index_list) if split and index_length > maxsize: index_sublist = np.array_split( index_list, math.ceil(index_length / maxsize) ) index_lists.extend(index_sublist) where_index.extend([where_idx] * len(index_sublist)) else: if not is_arraylike(index_list): index_list = np.array(index_list) index_lists.append(index_list) where_index.append(where_idx) dims = [range(len(bd)) for bd in chunks] indims = list(dims) indims[axis] = list(range(len(where_index))) keys = list(product([outname], *indims)) outdims = list(dims) outdims[axis] = where_index slices = [[colon] * len(bd) for bd in chunks] slices[axis] = index_lists slices = list(product(*slices)) inkeys = list(product([inname], *outdims)) values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)] chunks2 = list(chunks) chunks2[axis] = tuple(map(len, index_lists)) dsk = dict(zip(keys, values)) return tuple(chunks2), dsk
509
slicing.py
Python
dask/array/slicing.py
fa8dfede71677a2301d4cd602cf4b27af41cbc4f
dask
17
292,711
33
14
12
211
20
0
49
157
_fetch_status
Correctly handle missing mpd albumart (#66771) Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def _fetch_status(self): self._status = await self._client.status() self._currentsong = await self._client.currentsong() await self._async_update_media_image_hash() if (position := self._status.get("elapsed")) is None: position = self._status.get("time") if isinstance(position, str) and ":" in position: position = position.split(":")[0] if position is not None and self._media_position != position: self._media_position_updated_at = dt_util.utcnow() self._media_position = int(float(position)) await self._update_playlists()
123
media_player.py
Python
homeassistant/components/mpd/media_player.py
facf22c2ddccbf9b205a2d8b26da457330b53ba6
core
6
9,833
26
10
17
105
9
0
39
126
mixin_head_parser
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def mixin_head_parser(parser): gp = add_arg_group(parser, title='Head') gp.add_argument( '--uses-before-address', type=str, help='The address of the uses-before runtime', ) gp.add_argument( '--uses-after-address', type=str, help='The address of the uses-before runtime', ) gp.add_argument( '--connection-list', type=str, help='dictionary JSON with a list of connections to configure', )
61
head.py
Python
jina/parsers/peapods/runtimes/head.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
1
266,160
22
11
7
85
7
0
28
122
_get_color
Closes #10904: Added Colors to SVG for Front and Rear Ports (#10905) * Added Colors to SVG for Front and Reaer Ports Fix for feature request 10904 thanks to @TheZackCodec * Simplify termination color resolution Co-authored-by: jeremystretch <[email protected]>
https://github.com/netbox-community/netbox.git
def _get_color(cls, instance): if hasattr(instance, 'parent_object'): # Termination return getattr(instance, 'color', 'f0f0f0') or 'f0f0f0' if hasattr(instance, 'device_role'): # Device return instance.device_role.color else: # Other parent object return 'e0e0e0'
45
cables.py
Python
netbox/dcim/svg/cables.py
a2007a4728faafeee5c296afabc116707db42344
netbox
4
181,559
14
9
10
61
7
0
17
33
mask_color
format(black): formatted files to latest version (#1735)
https://github.com/Zulko/moviepy.git
def mask_color(clip, color=None, threshold=0, stiffness=1): if color is None: color = [0, 0, 0] color = np.array(color)
67
mask_color.py
Python
moviepy/video/fx/mask_color.py
0b63f472ee0b67bf000412179bfb8ee30c52ce27
moviepy
2
269,377
70
17
34
380
17
0
119
390
_preprocess_symbolic_input
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _preprocess_symbolic_input(x, data_format, mode): if mode == "tf": x /= 127.5 x -= 1.0 return x elif mode == "torch": x /= 255.0 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] else: if data_format == "channels_first": # 'RGB'->'BGR' if backend.ndim(x) == 3: x = x[::-1, ...] else: x = x[:, ::-1, ...] else: # 'RGB'->'BGR' x = x[..., ::-1] mean = [103.939, 116.779, 123.68] std = None mean_tensor = backend.constant(-np.array(mean)) # Zero-center by mean pixel if backend.dtype(x) != backend.dtype(mean_tensor): x = backend.bias_add( x, backend.cast(mean_tensor, backend.dtype(x)), data_format=data_format, ) else: x = backend.bias_add(x, mean_tensor, data_format) if std is not None: std_tensor = backend.constant(np.array(std), dtype=backend.dtype(x)) if data_format == "channels_first": std_tensor = backend.reshape(std_tensor, (-1, 1, 1)) x /= std_tensor return x
266
imagenet_utils.py
Python
keras/applications/imagenet_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
8
261,290
42
11
14
164
26
0
51
109
test_lassolarsic_noise_variance
MAINT Clean deprecation for 1.2: normalize in linear models (#24391)
https://github.com/scikit-learn/scikit-learn.git
def test_lassolarsic_noise_variance(fit_intercept): rng = np.random.RandomState(0) X, y = datasets.make_regression( n_samples=10, n_features=11 - fit_intercept, random_state=rng ) model = make_pipeline(StandardScaler(), LassoLarsIC(fit_intercept=fit_intercept)) err_msg = ( "You are using LassoLarsIC in the case where the number of samples is smaller" " than the number of features" ) with pytest.raises(ValueError, match=err_msg): model.fit(X, y) model.set_params(lassolarsic__noise_variance=1.0) model.fit(X, y).predict(X)
102
test_least_angle.py
Python
sklearn/linear_model/tests/test_least_angle.py
e41753ebd57c44ae91b389f190c43ddc0b384a75
scikit-learn
1
249,737
75
13
34
406
13
0
101
523
test_tokenize_query
Unified search query syntax using the full-text search capabilities of the underlying DB. (#11635) Support a unified search query syntax which leverages more of the full-text search of each database supported by Synapse. Supports, with the same syntax across Postgresql 11+ and Sqlite: - quoted "search terms" - `AND`, `OR`, `-` (negation) operators - Matching words based on their stem, e.g. searches for "dog" matches documents containing "dogs". This is achieved by - If on postgresql 11+, pass the user input to `websearch_to_tsquery` - If on sqlite, manually parse the query and transform it into the sqlite-specific query syntax. Note that postgresql 10, which is close to end-of-life, falls back to using `phraseto_tsquery`, which only supports a subset of the features. Multiple terms separated by a space are implicitly ANDed. Note that: 1. There is no escaping of full-text syntax that might be supported by the database; e.g. `NOT`, `NEAR`, `*` in sqlite. This runs the risk that people might discover this as accidental functionality and depend on something we don't guarantee. 2. English text is assumed for stemming. To support other languages, either the target language needs to be known at the time of indexing the message (via room metadata, or otherwise), or a separate index for each language supported could be created. Sqlite docs: https://www.sqlite.org/fts3.html#full_text_index_queries Postgres docs: https://www.postgresql.org/docs/11/textsearch-controls.html
https://github.com/matrix-org/synapse.git
def test_tokenize_query(self) -> None: cases = ( ("brown", ["brown"]), ("quick brown", ["quick", SearchToken.And, "brown"]), ("quick \t brown", ["quick", SearchToken.And, "brown"]), ('"brown quick"', [Phrase(["brown", "quick"])]), ("furphy OR fox", ["furphy", SearchToken.Or, "fox"]), ("fox -brown", ["fox", SearchToken.Not, "brown"]), ("- fox", [SearchToken.Not, "fox"]), ('"fox" quick', [Phrase(["fox"]), SearchToken.And, "quick"]), # No trailing double quoe. ('"fox quick', ["fox", SearchToken.And, "quick"]), ('"-fox quick', [SearchToken.Not, "fox", SearchToken.And, "quick"]), ('" quick "', [Phrase(["quick"])]), ( 'q"uick brow"n', [ "q", SearchToken.And, Phrase(["uick", "brow"]), SearchToken.And, "n", ], ), ( '-"quick brown"', [SearchToken.Not, Phrase(["quick", "brown"])], ), ) for query, expected in cases: tokenized = _tokenize_query(query) self.assertEqual( tokenized, expected, f"{tokenized} != {expected} for {query}" )
240
test_room_search.py
Python
tests/storage/test_room_search.py
d902181de98399d90c46c4e4e2cf631064757941
synapse
2
43,076
25
11
6
119
15
0
26
72
test_test_with_existing_dag_run
Mask secrets in stdout for 'airflow tasks test' (#24362) A stdout redirector is implemented to mask all values to stdout and redact any secrets in it with the secrets masker. This redirector is applied to the 'airflow.task' logger. Co-authored-by: Alex Kennedy <[email protected]>
https://github.com/apache/airflow.git
def test_test_with_existing_dag_run(self, caplog): task_id = 'print_the_context' args = self.parser.parse_args(["tasks", "test", self.dag_id, task_id, DEFAULT_DATE.isoformat()]) with caplog.at_level("INFO", logger="airflow.task"): task_command.task_test(args) assert f"Marking task as SUCCESS. dag_id={self.dag_id}, task_id={task_id}" in caplog.text
62
test_task_command.py
Python
tests/cli/commands/test_task_command.py
3007159c2468f8e74476cc17573e03655ab168fa
airflow
1
40,644
69
15
16
175
12
0
91
364
_determine_axis_sharing
Refactor figure setup and subplot metadata tracking into Subplots class Squashed commit of the following: commit e6f99078d46947eab678b9dd0303657a3129f9fc Author: Michael Waskom <[email protected]> Date: Sun Aug 1 17:56:49 2021 -0400 Address a couple TODOs commit c48ba3af8095973b7dca9554934a695751f58726 Author: Michael Waskom <[email protected]> Date: Mon Jul 26 06:42:29 2021 -0400 Add docstrings in Subplots commit 97e6465b0f998f541b445b189682fbf134869391 Author: Michael Waskom <[email protected]> Date: Sun Jul 25 17:53:22 2021 -0400 Fix unshared label visibility test commit e2d93a28313c2cb9170e56b2e4b373987993be7c Author: Michael Waskom <[email protected]> Date: Sun Jul 25 17:16:41 2021 -0400 Add more label visibility tests commit 698ee72b5d5f9f3939c50cde9e2baacdf5487807 Author: Michael Waskom <[email protected]> Date: Sat Jul 24 11:08:32 2021 -0400 Begin adding label visibility tests commit 97167b4701532eeccadaa899520d57e38c26dd43 Author: Michael Waskom <[email protected]> Date: Mon Jul 19 06:55:48 2021 -0400 Fix interior tick labels with unshared axes commit 9331d5d91a7861aebfe03fa86ee122902c0d1d8a Author: Michael Waskom <[email protected]> Date: Sat Jul 17 17:03:48 2021 -0400 Fix interior labels for wrapped plots commit 38f2efa7e732958430c006f24827c6ac69640ef3 Author: Michael Waskom <[email protected]> Date: Sat Jul 17 16:03:34 2021 -0400 Fix non-cartesian interior labels commit 3c07f981110890d38aee19b38c43080863132122 Author: Michael Waskom <[email protected]> Date: Sat Jul 17 15:44:48 2021 -0400 Integrate Subplots into Plot commit 841a3c998eae8f8cc85fd65af7ea8e6f32fc5510 Author: Michael Waskom <[email protected]> Date: Sat Jul 17 13:00:09 2021 -0400 Complete subplots tests commit 8ceb7e6c35ea0cbcd014067035d7ea219204f464 Author: Michael Waskom <[email protected]> Date: Fri Jul 16 19:45:29 2021 -0400 Continue building out subplot tests commit b0ce0e7a9e3534fdad04ef9e287e4c6bb19fe684 Author: Michael Waskom <[email protected]> Date: Thu Jul 15 21:35:21 2021 -0400 Continue building out subplots tests commit 5f4b67d4d90cde7d0d899527b1fd8607348a5f5b Author: Michael Waskom <[email protected]> Date: Wed Jul 14 20:57:35 2021 -0400 Add some tests for Subplots functionality commit 58fbf8e3f349174f4d1d29f71fa867ad4b49d264 Author: Michael Waskom <[email protected]> Date: Sun Jul 11 20:49:29 2021 -0400 Begin refactoring figure setup into Subplots class commit 6bb853e20ad3b42b2728d212a51ed8de2ff47bde Author: Michael Waskom <[email protected]> Date: Sun Jul 11 16:02:26 2021 -0400 Fix overlooked lint and test
https://github.com/mwaskom/seaborn.git
def _determine_axis_sharing(self) -> None: axis_to_dim = {"x": "col", "y": "row"} key: str val: str | bool for axis in "xy": key = f"share{axis}" # Always use user-specified value, if present if key not in self.subplot_spec: if axis in self.pair_spec: # Paired axes are shared along one dimension by default if self.wrap in [None, 1] and self.pair_spec.get("cartesian", True): val = axis_to_dim[axis] else: val = False else: # This will pick up faceted plots, as well as single subplot # figures, where the value doesn't really matter val = True self.subplot_spec[key] = val
97
subplots.py
Python
seaborn/_core/subplots.py
c16180493bd44fd76092fdd9ea0060bac91e47fe
seaborn
6
69,330
72
20
35
544
39
0
118
83
get_item_map
refactor: rewrite `Stock Projected Qty Report` queries in `QB`
https://github.com/frappe/erpnext.git
def get_item_map(item_code, include_uom): bin = frappe.qb.DocType("Bin") item = frappe.qb.DocType("Item") query = ( frappe.qb.from_(item) .select(item.name, item.item_name, item.description, item.item_group, item.brand, item.stock_uom) .where( (item.is_stock_item == 1) & (item.disabled == 0) & ( (item.end_of_life > today()) | (item.end_of_life.isnull()) | (item.end_of_life == "0000-00-00") ) & (ExistsCriterion(frappe.qb.from_(bin).select(bin.name).where(bin.item_code == item.name))) ) ) if item_code: query = query.where(item.item_code == item_code) if include_uom: ucd = frappe.qb.DocType("UOM Conversion Detail") query = query.left_join(ucd).on((ucd.parent == item.name) & (ucd.uom == include_uom)) items = query.run(as_dict=True) ir = frappe.qb.DocType("Item Reorder") query = frappe.qb.from_(ir).select("*") if item_code: query = query.where(ir.parent == item_code) reorder_levels = frappe._dict() for d in query.run(as_dict=True): if d.parent not in reorder_levels: reorder_levels[d.parent] = [] reorder_levels[d.parent].append(d) item_map = frappe._dict() for item in items: item["reorder_levels"] = reorder_levels.get(item.name) or [] item_map[item.name] = item return item_map
340
stock_projected_qty.py
Python
erpnext/stock/report/stock_projected_qty/stock_projected_qty.py
c18f13a45ba6498245ba0429e61205a0df2434a6
erpnext
8
259,435
110
15
32
446
43
1
159
438
test_loss_of_perfect_prediction
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_loss_of_perfect_prediction(loss, sample_weight): if not loss.is_multiclass: # Use small values such that exp(value) is not nan. raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10]) # If link is identity, we must respect the interval of y_pred: if isinstance(loss.link, IdentityLink): eps = 1e-10 low = loss.interval_y_pred.low if not loss.interval_y_pred.low_inclusive: low = low + eps high = loss.interval_y_pred.high if not loss.interval_y_pred.high_inclusive: high = high - eps raw_prediction = np.clip(raw_prediction, low, high) y_true = loss.link.inverse(raw_prediction) else: # HalfMultinomialLoss y_true = np.arange(loss.n_classes).astype(float) # raw_prediction with entries -exp(10), but +exp(10) on the diagonal # this is close enough to np.inf which would produce nan raw_prediction = np.full( shape=(loss.n_classes, loss.n_classes), fill_value=-np.exp(10), dtype=float, ) raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10) if sample_weight == "range": sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) loss_value = loss.loss( y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, ) constant_term = loss.constant_to_optimal_zero( y_true=y_true, sample_weight=sample_weight ) # Comparing loss_value + constant_term to zero would result in large # round-off errors. assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15) @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) @pytest.mark.parametrize("sample_weight", [None, "range"])
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) @pytest.mark.parametrize("sample_weight", [None, "range"])
266
test_loss.py
Python
sklearn/_loss/tests/test_loss.py
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
scikit-learn
6
259,424
22
12
8
110
16
0
24
88
transform
ENH Preserving dtypes for ICA (#22806) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def transform(self, X, copy=True): check_is_fitted(self) X = self._validate_data( X, copy=(copy and self._whiten), dtype=[np.float64, np.float32], reset=False ) if self._whiten: X -= self.mean_ return np.dot(X, self.components_.T)
73
_fastica.py
Python
sklearn/decomposition/_fastica.py
d14fd82cf423c21ab6d01f7d0430083f9d7026be
scikit-learn
3
120,507
103
17
81
439
27
0
181
334
polar
Change implementation of jax.scipy.linalg.polar() and jax._src.scipy.eigh to use the QDWH decomposition from jax._src.lax.qdwh. Remove jax._src.lax.polar. PiperOrigin-RevId: 448241206
https://github.com/google/jax.git
def polar(a, side='right', *, method='qdwh', eps=None, max_iterations=None): r a = jnp.asarray(a) if a.ndim != 2: raise ValueError("The input `a` must be a 2-D array.") if side not in ["right", "left"]: raise ValueError("The argument `side` must be either 'right' or 'left'.") m, n = a.shape if method == "qdwh": # TODO(phawkins): return info also if the user opts in? if m >= n and side == "right": unitary, posdef, _, _ = qdwh.qdwh(a, is_hermitian=False, eps=eps) elif m < n and side == "left": a = a.T.conj() unitary, posdef, _, _ = qdwh.qdwh(a, is_hermitian=False, eps=eps) posdef = posdef.T.conj() unitary = unitary.T.conj() else: raise NotImplementedError("method='qdwh' only supports mxn matrices " "where m < n where side='right' and m >= n " f"side='left', got {a.shape} with side={side}") elif method == "svd": u_svd, s_svd, vh_svd = lax_linalg.svd(a, full_matrices=False) unitary = u_svd @ vh_svd if side == "right": # a = u * p posdef = (vh_svd.T.conj() * s_svd[None, :]) @ vh_svd else: # a = p * u posdef = (u_svd * s_svd[None, :]) @ (u_svd.T.conj()) else: raise ValueError(f"Unknown polar decomposition method {method}.") return unitary, posdef
258
linalg.py
Python
jax/_src/scipy/linalg.py
7ba36fc1784a7a286aa13ab7c098f84ff64336f1
jax
10
167,666
9
9
4
55
6
1
9
24
data_missing_for_sorting
ENH/TST: Add BaseMethodsTests tests for ArrowExtensionArray (#47552) * ENH/TST: Add BaseMethodsTests tests for ArrowExtensionArray * Passing test now * add xfails for arraymanager * Fix typo * Trigger CI * Add xfails for min version and datamanger * Adjust more tests
https://github.com/pandas-dev/pandas.git
def data_missing_for_sorting(data_for_grouping): return type(data_for_grouping)._from_sequence( [data_for_grouping[0], data_for_grouping[2], data_for_grouping[4]] ) @pytest.fixture
@pytest.fixture
31
test_arrow.py
Python
pandas/tests/extension/test_arrow.py
700ef33b4dd5d5e76220ee50d1f07775dff15428
pandas
1
190,658
4
6
11
16
2
0
4
11
test_class_or_function_idval
Refactor idmaker functions into class IdMaker This commit only refactors, it does not change or add functionality yet. Public API is retained. Reason or refactoring: User provided parameter IDs (e.g. Metafunc.parametrize(ids=...)) had so far only been used to calculate a unique test ID for each test invocation. That test ID was a joined string where each parameter contributed some partial ID. We're soon going to reuse functionality to generate parameter keys for reorder_items and FixtureDef cache. We will be interested in the partial IDs, and only if they originate from explicit user information. Refactoring makes logic and data accessible for reuse, and increases cohesion in general.
https://github.com/pytest-dev/pytest.git
def test_class_or_function_idval(self) -> None:
66
metafunc.py
Python
testing/python/metafunc.py
b21b008118fc8cf65b4bcd9b059f1cd704e05c68
pytest
2
257,715
56
16
10
152
17
0
85
214
load_search_paths
Add enhanced pydoc-markdown pre-hook (#2979) * add pydoc-markdown pre-hook * add more comments, remove debug prints
https://github.com/deepset-ai/haystack.git
def load_search_paths(): paths = dict() for fname in glob.glob("docs/_src/api/pydoc/*.yml"): with open(fname) as f: config = yaml.safe_load(f.read()) # we always have only one loader in Haystack loader = config["loaders"][0] # `search_path` is a list but we always have only one item in Haystack search_path = loader["search_path"][0] # we only need the relative path from the root, let's call `resolve` to # get rid of the `../../` prefix search_path = str(pathlib.Path(search_path).resolve()) # `resolve` will prepend a `/` to the path, remove it paths[search_path[1:]] = fname return paths
85
pydoc-markdown.py
Python
.github/utils/pydoc-markdown.py
0e8efdafa9e1b3e1a7096308b13760e5163f1439
haystack
2
195,007
41
11
10
155
14
0
49
125
contains_offensive_language
Fix `detect_offensive_language` script (#4054) * add fix * add test * make utils/safety robust to empty strings
https://github.com/facebookresearch/ParlAI.git
def contains_offensive_language(self, text): if not text: return False, 1.0 act = {'text': text, 'episode_done': True} self.model.observe(act) response = self.model.act()['text'] pred_class, prob = [x.split(': ')[-1] for x in response.split('\n')] pred_not_ok = self.classes[pred_class] # check whether classified as NOT OK prob = float(prob) # cast string to float return pred_not_ok, prob
92
safety.py
Python
parlai/utils/safety.py
e98b8603d83ab538ec6715f913a5ca980421ad6a
ParlAI
3
211,971
76
12
35
273
34
0
110
327
recompute
Redesign serialization protocol (#11960) * Redesign serialization in bokeh * Redesign deserialization in bokehjs * Resolve type issues and test failures * Make 'bytes' serialization work in bokeh * Partially update bokeh's serialization tests * Resolve issues with cyclic references * Don't limit StaticGraphProvider to tuples * Make FixedTicker.ticks' type more flexible * Use np.array instead of np.ndarray * Remove references to BokehJSONEncoder * Resolve sphinx warnings related to JSON * Implement hybrid serialization for map/dict * Use === or !== with unset symbol * Finalize deserialization of refs * Remove 'old' attribute from ModelChangedEvent * Make ButtonClick.__init__ less restrictive * Use Map<number, ...> in StaticLayoutProvider.graph_layout * Start using Map<K, V> for non-string keys * Fix plotting/file/line_on_off example * Don't denormalize specs in bokehjs * Hack around issues with resolving figure model * Remove special cases from defaults' tests * Temporarily update unit/bokeh/test_objects * Promote streaming/patching events and remove hints * Allow to stream/patch any property in bokehjs * Drop unneeded Property.serializable_value() * Set callback_invoker on hinted events * Simplify unit/bokeh/test_objects.py * Always preserve ndarrays even for dtype="object" * Refine and normalize naming conventions * Remove unused functions * Move Model.to_json() to sphinxext.bokeh_model * Include references in serialized values * Actually encode data when streaming/patching * Robustify differential serialization * Allow bokehjs to send binary buffers * Add dtype=object code path to ColorSpec * Simplify definitions of data specs * Remove meaningless code comments * Introduce Bytes and replace Base64String * Add support for serialization of slices * Remove obsolete comment from property/dataspec.py * Add a comment regarding ndarray.tobytes() * Try serializing pandas' types last * Standardize error reporting * Resturucture bokehjs serialization code * Redesign default model resolution * Refactor 'kind' in document events * Don't depend on Document in Deserializer * Make Deserializer.encode() re-entrant * Move *Buffer to serialization/buffer * Finalize differential serialization * Serialize vectorized values as structures * Rename Event.{decode_json->from_serializable} * Don't use has_ref() in Model.to_serializable() * Handle circular object references in bokehjs * Reorganize serialization unit tests * Redesign model registry and qualified names * Remove the need for StaticSerializer * Make 'attributes' optional in type reps * Allow to serialize typed arrays as binary * Finalize handling of binary buffers * Use memoryview to further defer encoding * Test dict serialization and ordering * Downcast ndarrays {u}int{64->32} if possible * Add preliminary release/migration notes * Robustify encoding of objects and object refs * Remove support for serialization of relativedelta * Import pandas only if really necessary * Statically type bokeh.core.serialization * Add preliminary serialization's documentation * Add Deserializer.deserialize() for symmetric APIs * Handle streaming/patching/data events in io.notebook * Update handling of buffers in io.notebook * Properly serialize MessageSent event * Add a regression test for issue #11694 * Preserve order of inherited properties * Add support for serialization of symbols * Update defaults' tests to use type="object" * Move DocJson.version to the first entry * Add a preliminary regression test for #11930 * Fix integration/glyphs/rect_log_axis.py * Fix value detection in dataspecs involving String * Remove an unnecessary type assertion
https://github.com/bokeh/bokeh.git
def recompute(self) -> None: document = self._document() if document is None: return new_models: Set[Model] = set() for mr in document.roots: new_models |= mr.references() old_models = set(self._models.values()) to_detach = old_models - new_models to_attach = new_models - old_models recomputed: Dict[ID, Model] = {} recomputed_by_name: MultiValuedDict[str, Model] = MultiValuedDict() for mn in new_models: recomputed[mn.id] = mn if mn.name is not None: recomputed_by_name.add_value(mn.name, mn) for md in to_detach: self._seen_model_ids.add(md.id) md._detach_document() for ma in to_attach: ma._attach_document(document) self._new_models.add(ma) self._models = recomputed self._models_by_name = recomputed_by_name # XXX (bev) In theory, this is a potential issue for long-running apps that # update the model graph continuously, since this set of "seen" model ids can # grow without bound.
170
models.py
Python
bokeh/document/models.py
fca16442ae90afcd2ac61f4e554e538776730831
bokeh
7
84,404
14
9
26
75
10
0
16
51
test_receive_stream_email_forwarded_success
email_error: Improve tests for inbound email edge case. Tests to assist with clarifying #22585.
https://github.com/zulip/zulip.git
def test_receive_stream_email_forwarded_success(self) -> None: msgtext = user_profile = self.example_user("hamlet") self.login_user(user_profile) self.subscribe(user_profile, "Denmark") stream = get_stream("Denmark", user_profile.realm)
73
test_email_mirror.py
Python
zerver/tests/test_email_mirror.py
c6931434e994aee5fa15696be4d8db2c99c6c891
zulip
1
30,315
9
8
6
33
4
0
9
39
clean_ansi_sequence
remove ansi sequences from output in tests fixing tests Update test_entry_point.py
https://github.com/spotDL/spotify-downloader.git
def clean_ansi_sequence(text): return re.sub( r'(?:\x1B[@-Z\\-_]|[\x80-\x9A\x9C-\x9F]|(?:\x1B\[|\x9B)[0-?]*[ -/]*[@-~])', '', text, )
19
conftest.py
Python
tests/conftest.py
e2155b300889d657a29f4db0471643b25a8af982
spotify-downloader
1
101,386
7
11
3
42
6
0
7
21
has_predicted_mask
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
https://github.com/deepfakes/faceswap.git
def has_predicted_mask(self) -> bool: return bool(self._model.config.get("learn_mask", False))
24
convert.py
Python
scripts/convert.py
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
1
12,806
13
12
6
54
7
0
14
68
collect_all_results
fix: fix specific params problem with branches (#5038)
https://github.com/jina-ai/jina.git
def collect_all_results(self): res = {} for node in self.all_nodes: if node.result_in_params_returned: res.update(node.result_in_params_returned) return res
32
topology_graph.py
Python
jina/serve/runtimes/gateway/graph/topology_graph.py
032bd5e662dd16d4cfb394857074779970ae444a
jina
3
19,465
21
11
11
64
8
0
23
53
_looks_like_bpo_44860
Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009) * Vendor in pip 21.2.4 release (from pip 21.2.2 prior). * Add news fragment for pip 21.2.4 vendor update. * Add potentially missing LICENSE files
https://github.com/pypa/pipenv.git
def _looks_like_bpo_44860() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore try: unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"] except KeyError: return False return unix_user_platlib == "$usersite"
35
__init__.py
Python
pipenv/patched/notpip/_internal/locations/__init__.py
7e33fcae4384563b4c927fd44318c29dd524a097
pipenv
2
247,938
9
8
14
32
5
0
10
31
test_lots_of_queued_things
Convert `Linearizer` tests from `inlineCallbacks` to async (#12353) Signed-off-by: Sean Quah <[email protected]>
https://github.com/matrix-org/synapse.git
def test_lots_of_queued_things(self) -> None: linearizer = Linearizer() key = ""
72
test_linearizer.py
Python
tests/util/test_linearizer.py
41b5f72677ea9763f3cf920d4f6df507653222f2
synapse
2
243,932
116
19
42
642
63
0
178
828
forward
Fix None grad problem during training TOOD by adding SigmoidGeometricMean (#7090)
https://github.com/open-mmlab/mmdetection.git
def forward(self, feats): cls_scores = [] bbox_preds = [] for idx, (x, scale, stride) in enumerate( zip(feats, self.scales, self.prior_generator.strides)): b, c, h, w = x.shape anchor = self.prior_generator.single_level_grid_priors( (h, w), idx, device=x.device) anchor = torch.cat([anchor for _ in range(b)]) # extract task interactive features inter_feats = [] for inter_conv in self.inter_convs: x = inter_conv(x) inter_feats.append(x) feat = torch.cat(inter_feats, 1) # task decomposition avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) cls_feat = self.cls_decomp(feat, avg_feat) reg_feat = self.reg_decomp(feat, avg_feat) # cls prediction and alignment cls_logits = self.tood_cls(cls_feat) cls_prob = self.cls_prob_module(feat) cls_score = sigmoid_geometric_mean(cls_logits, cls_prob) # reg prediction and alignment if self.anchor_type == 'anchor_free': reg_dist = scale(self.tood_reg(reg_feat).exp()).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = distance2bbox( self.anchor_center(anchor) / stride[0], reg_dist).reshape(b, h, w, 4).permute(0, 3, 1, 2) # (b, c, h, w) elif self.anchor_type == 'anchor_based': reg_dist = scale(self.tood_reg(reg_feat)).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape( b, h, w, 4).permute(0, 3, 1, 2) / stride[0] else: raise NotImplementedError( f'Unknown anchor type: {self.anchor_type}.' f'Please use `anchor_free` or `anchor_based`.') reg_offset = self.reg_offset_module(feat) bbox_pred = self.deform_sampling(reg_bbox.contiguous(), reg_offset.contiguous()) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return tuple(cls_scores), tuple(bbox_preds)
415
tood_head.py
Python
mmdet/models/dense_heads/tood_head.py
08bc3d7a8b08d9452d0734e9ff29c2e9b98890e5
mmdetection
6
68,836
50
18
71
304
22
0
80
49
get_mode_of_payment_details
refactor: DB independent quoting and truthy/falsy values (#31358) * refactor: DB independent quoting and truthy/falsy values * style: reformat to black spec * fix: ifnull -> coalesce * fix: coalesce -> Coalesce * fix: revert pypika comparison * refactor: convert queries to QB * fix: incorrect value types for query `=` query makes no sense with list of values * fix: remove warehouse docstatus condition * fix: keep using base rate as rate Co-authored-by: Ankush Menat <[email protected]>
https://github.com/frappe/erpnext.git
def get_mode_of_payment_details(filters): mode_of_payment_details = {} invoice_list = get_invoices(filters) invoice_list_names = ",".join("'" + invoice["name"] + "'" for invoice in invoice_list) if invoice_list: inv_mop_detail = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) inv_change_amount = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) for d in inv_change_amount: for det in inv_mop_detail: if ( det["owner"] == d["owner"] and det["posting_date"] == d["posting_date"] and det["mode_of_payment"] == d["mode_of_payment"] ): paid_amount = det["paid_amount"] - d["change_amount"] det["paid_amount"] = paid_amount for d in inv_mop_detail: mode_of_payment_details.setdefault(d["owner"] + cstr(d["posting_date"]), []).append( (d.mode_of_payment, d.paid_amount) ) return mode_of_payment_details
181
sales_payment_summary.py
Python
erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py
74a782d81d8f8c4a4d9214a9c06377e5e6e464dd
erpnext
9
278,956
6
7
22
22
4
0
6
12
_replace_child_layer_functions
Remove pylint comments. PiperOrigin-RevId: 452353044
https://github.com/keras-team/keras.git
def _replace_child_layer_functions(layer, serialization_cache): original_fns = {}
106
save_impl.py
Python
keras/saving/saved_model/save_impl.py
3613c3defc39c236fb1592c4f7ba1a9cc887343a
keras
6
130,340
64
14
24
267
18
0
92
296
fillout_resources_kubernetes
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def fillout_resources_kubernetes(config): if "available_node_types" not in config: return config node_types = copy.deepcopy(config["available_node_types"]) head_node_type = config["head_node_type"] for node_type in node_types: node_config = node_types[node_type]["node_config"] # The next line is for compatibility with configs like # kubernetes/example-ingress.yaml, # cf. KubernetesNodeProvider.create_node(). pod = node_config.get("pod", node_config) container_data = pod["spec"]["containers"][0] autodetected_resources = get_autodetected_resources(container_data) if node_types == head_node_type: # we only autodetect worker type node memory resource autodetected_resources.pop("memory") if "resources" not in config["available_node_types"][node_type]: config["available_node_types"][node_type]["resources"] = {} autodetected_resources.update( config["available_node_types"][node_type]["resources"] ) config["available_node_types"][node_type]["resources"] = autodetected_resources logger.debug( "Updating the resources of node type {} to include {}.".format( node_type, autodetected_resources ) ) return config
151
config.py
Python
python/ray/autoscaler/_private/_kubernetes/config.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
5
132,909
20
9
3
47
10
0
20
48
nodes
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def nodes(self): # This should be imported here, otherwise, it will error doc build. import ray.core.generated.ray_client_pb2 as ray_client_pb2 return self.worker.get_cluster_info(ray_client_pb2.ClusterInfoType.NODES)
29
api.py
Python
python/ray/util/client/api.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
260,539
20
10
13
124
16
0
21
128
fit
MAINT parameter validation for CountVectorizer & TfidfVectorizer (#23853) Co-authored-by: Meekail Zain <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, raw_documents, y=None): self._validate_params() self._check_params() self._warn_for_unused_params() self._tfidf = TfidfTransformer( norm=self.norm, use_idf=self.use_idf, smooth_idf=self.smooth_idf, sublinear_tf=self.sublinear_tf, ) X = super().fit_transform(raw_documents) self._tfidf.fit(X) return self
78
text.py
Python
sklearn/feature_extraction/text.py
c300a8f2178fcae847f82ad548fe9452f2ba8bbb
scikit-learn
1
296,854
30
9
10
123
7
0
48
126
pretty_duration
Refactor history_stats to minimize database access (part 2) (#70255)
https://github.com/home-assistant/core.git
def pretty_duration(hours): seconds = int(3600 * hours) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) if days > 0: return "%dd %dh %dm" % (days, hours, minutes) if hours > 0: return "%dh %dm" % (hours, minutes) return "%dm" % minutes
76
helpers.py
Python
homeassistant/components/history_stats/helpers.py
73a368c24246b081cdb98923ca3180937d436c3b
core
3
249,151
36
10
36
177
13
0
47
219
test_search_term
Use literals in place of `HTTPStatus` constants in tests (#13469)
https://github.com/matrix-org/synapse.git
def test_search_term(self) -> None: # Create two test rooms room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) room_name_1 = "something" room_name_2 = "LoremIpsum" # Set the name for each room self.helper.send_state( room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok, ) self.helper.send_state( room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok, ) self._set_canonical_alias(room_id_1, "#Room_Alias1:test", self.admin_user_tok)
221
test_room.py
Python
tests/rest/admin/test_room.py
c97042f7eef3748e17c90e48a4122389a89c4735
synapse
1
273,184
70
12
19
248
25
0
99
294
get_vocabulary
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def get_vocabulary(self, include_special_tokens=True): # The lookup table data will not be sorted, so we will create a inverted # lookup here, and use that to lookup a range of indices [0, vocab_size). if self.lookup_table.size() == 0: vocab, indices = [], [] else: keys, values = self.lookup_table.export() vocab, indices = (values, keys) if self.invert else (keys, values) vocab, indices = ( self._tensor_vocab_to_numpy(vocab), indices.numpy(), ) lookup = collections.defaultdict( lambda: self.oov_token, zip(indices, vocab) ) vocab = [lookup[x] for x in range(self.vocabulary_size())] if self.mask_token is not None and self.output_mode == INT: vocab[0] = self.mask_token if not include_special_tokens: vocab = vocab[self._token_start_index() :] return vocab
158
index_lookup.py
Python
keras/layers/preprocessing/index_lookup.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
7
26,497
9
8
3
31
5
0
11
16
test_validate_invalid_subscription_and_query
Add Webhook payload via graphql subscriptions (#9394) * Add PoC of webhook subscriptions * add async webhooks subscription payloads feature * remove unneeded file * add translations subscription handling, fixes after review * remove todo * add descriptions * add descriptions, move subsrciption_payloads.py * refactor * fix imports, add changelog * check_document_is_single_subscription refactor Co-authored-by: Maciej Korycinski <[email protected]> Co-authored-by: Marcin Gębala <[email protected]>
https://github.com/saleor/saleor.git
def test_validate_invalid_subscription_and_query(): result = validate_subscription_query(TEST_INVALID_MULTIPLE_SUBSCRIPTION_AND_QUERY) assert result is False TEST_INVALID_MULTIPLE_SUBSCRIPTION =
14
test_create_deliveries_for_subscription.py
Python
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py
aca6418d6c36956bc1ab530e6ef7e146ec9df90c
saleor
1
176,825
37
15
23
209
19
0
62
319
analyze_symmetry
Update ISMAGS.analyze_symmetry docstring. (#5696) * Update ISMAGS.analyze_symmetry docstring. Co-authored-by: Peter C Kroon <[email protected]> * Fix formatting. Co-authored-by: Peter C Kroon <[email protected]>
https://github.com/networkx/networkx.git
def analyze_symmetry(self, graph, node_partitions, edge_colors): if self._symmetry_cache is not None: key = hash( ( tuple(graph.nodes), tuple(graph.edges), tuple(map(tuple, node_partitions)), tuple(edge_colors.items()), ) ) if key in self._symmetry_cache: return self._symmetry_cache[key] node_partitions = list( self._refine_node_partitions(graph, node_partitions, edge_colors) ) assert len(node_partitions) == 1 node_partitions = node_partitions[0] permutations, cosets = self._process_ordered_pair_partitions( graph, node_partitions, node_partitions, edge_colors ) if self._symmetry_cache is not None: self._symmetry_cache[key] = permutations, cosets return permutations, cosets
140
ismags.py
Python
networkx/algorithms/isomorphism/ismags.py
52fe06608904ded6d3a0d49a64129e043351b308
networkx
4
261,313
31
11
9
95
8
0
34
125
set_output
ENH Introduces set_output API for pandas output (#23734) * Introduces set_output API for all transformers * TransformerMixin inherits from _SetOutputMixin * Adds tests * Adds whatsnew * Adds example on using set_output API * Adds developer docs for set_output
https://github.com/scikit-learn/scikit-learn.git
def set_output(self, *, transform=None): if hasattr(super(), "set_output"): return super().set_output(transform=transform) if transform == "pandas" and self.feature_names_out is None: warnings.warn( 'With transform="pandas", `func` should return a DataFrame to follow' " the set_output API." ) return self
53
_function_transformer.py
Python
sklearn/preprocessing/_function_transformer.py
2a6703d9e8d1e54d22dd07f2bfff3c92adecd758
scikit-learn
4
167,798
40
11
21
138
11
0
45
116
add_flex_arithmetic_methods
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
https://github.com/pandas-dev/pandas.git
def add_flex_arithmetic_methods(cls) -> None: flex_arith_method, flex_comp_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method) new_methods.update( { "multiply": new_methods["mul"], "subtract": new_methods["sub"], "divide": new_methods["div"], } ) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_")) _add_methods(cls, new_methods=new_methods)
80
methods.py
Python
pandas/core/ops/methods.py
f65417656ba8c59438d832b6e2a431f78d40c21c
pandas
2
19,668
68
15
30
349
35
0
88
425
get_paths
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
https://github.com/pypa/pipenv.git
def get_paths(self): # type: () -> Optional[Dict[str, str]] tmpfile = vistir.path.create_tracked_tempfile(suffix=".json") tmpfile.close() tmpfile_path = make_posix(tmpfile.name) py_command = self.build_command( python_lib=True, python_inc=True, scripts=True, py_version=True ) command = [self.python, "-c", py_command.format(tmpfile_path)] c = subprocess_run(command) if c.returncode == 0: paths = {} with open(tmpfile_path, "r", encoding="utf-8") as fh: paths = json.load(fh) if "purelib" in paths: paths["libdir"] = paths["purelib"] = make_posix(paths["purelib"]) for key in ( "platlib", "scripts", "platstdlib", "stdlib", "include", "platinclude", ): if key in paths: paths[key] = make_posix(paths[key]) return paths else: vistir.misc.echo(f"Failed to load paths: {c.stderr}", fg="yellow") vistir.misc.echo(f"Output: {c.stdout}", fg="yellow") return None
197
environment.py
Python
pipenv/environment.py
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
pipenv
5
153,789
8
9
43
38
6
0
8
22
test_postprocessing_with_all_metadata
FEAT-#4412: Add Batch Pipeline API to Modin (#4452) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
https://github.com/modin-project/modin.git
def test_postprocessing_with_all_metadata(self): arr = np.random.randint(0, 1000, (1000, 1000))
335
test_pipeline.py
Python
modin/experimental/batch/test/test_pipeline.py
3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8
modin
9
19,989
6
8
10
30
4
0
6
12
captured_stdout
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def captured_stdout() -> ContextManager[StreamWrapper]: return captured_output("stdout")
15
misc.py
Python
pipenv/patched/notpip/_internal/utils/misc.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
215,964
104
19
40
451
33
0
135
470
list_sites
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
https://github.com/saltstack/salt.git
def list_sites(): ret = dict() ps_cmd = [ "Get-ChildItem", "-Path", r"'IIS:\Sites'", "|", "Select-Object applicationPool, Bindings, ID, Name, PhysicalPath, State", ] keep_keys = ("certificateHash", "certificateStoreName", "protocol", "sslFlags") cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret["stdout"], strict=False) except ValueError: raise CommandExecutionError("Unable to parse return data as Json.") for item in items: bindings = dict() for binding in item["bindings"]["Collection"]: # Ignore bindings which do not have host names if binding["protocol"] not in ["http", "https"]: continue filtered_binding = dict() for key in binding: if key in keep_keys: filtered_binding.update({key.lower(): binding[key]}) binding_info = binding["bindingInformation"].split(":", 2) ipaddress, port, hostheader = (element.strip() for element in binding_info) filtered_binding.update( {"hostheader": hostheader, "ipaddress": ipaddress, "port": port} ) bindings[binding["bindingInformation"]] = filtered_binding ret[item["name"]] = { "apppool": item["applicationPool"], "bindings": bindings, "id": item["id"], "state": item["state"], "sourcepath": item["physicalPath"], } if not ret: log.warning("No sites found in output: %s", cmd_ret["stdout"]) return ret
260
win_iis.py
Python
salt/modules/win_iis.py
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
salt
9
200,378
33
14
8
82
10
0
41
133
_construct
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
https://github.com/sympy/sympy.git
def _construct(cls, attr, arg): # arg may be ``NoneToken()``, so comparison is done using == instead of ``is`` operator if arg == None: return cls.defaults.get(attr, none) else: if isinstance(arg, Dummy): # SymPy's replace uses Dummy instances return arg else: return cls._get_constructor(attr)(arg)
50
ast.py
Python
sympy/codegen/ast.py
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
3
101,232
10
10
4
66
9
0
10
43
_kwarg_requirements
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
https://github.com/deepfakes/faceswap.git
def _kwarg_requirements(self) -> Dict[Literal["gaussian", "normalized"], List[str]]: return dict(gaussian=["ksize", "sigmaX"], normalized=["ksize"])
38
detected_face.py
Python
lib/align/detected_face.py
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
1
259,041
56
14
18
207
15
0
74
194
_set_order
MNT Drops Python 3.7 in CI, wheel building, and docs (#22617) * MNT Drops Python 3.7 * MNT Bump NumPy and SciPy * FIX Fix build * FIX Bump versions improved * DOC Fixes numpy version [pypy] * BLD [pypy] [icc-build] * Update docs * MAINT use scipy.optimize.LinearConstraint in test * MAINT scipy 1.1.0 related code clean-up * scipy>=1.3.2 in pyproject.toml's build deps * [cd build] * DOC Adds comment about pypy * MAINT remove _astype_copy_false * FIX Update check for python version in setup.py Co-authored-by: Olivier Grisel <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _set_order(X, y, order="C"): if order not in [None, "C", "F"]: raise ValueError( "Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order) ) sparse_X = sparse.issparse(X) sparse_y = sparse.issparse(y) if order is not None: sparse_format = "csc" if order == "F" else "csr" if sparse_X: X = X.asformat(sparse_format, copy=False) else: X = np.asarray(X, order=order) if sparse_y: y = y.asformat(sparse_format) else: y = np.asarray(y, order=order) return X, y ############################################################################### # Paths functions
123
_coordinate_descent.py
Python
sklearn/linear_model/_coordinate_descent.py
f1d3417b086550be670cbfbb5b3c1760ac99203f
scikit-learn
6
47,487
48
13
22
314
38
0
68
238
test_find_executable_task_instances_order_execution_date
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_find_executable_task_instances_order_execution_date(self, dag_maker): dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-a' dag_id_2 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-b' task_id = 'task-a' session = settings.Session() with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr1 = dag_maker.create_dagrun(execution_date=DEFAULT_DATE + timedelta(hours=1)) with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr2 = dag_maker.create_dagrun() dr1 = session.merge(dr1, load=False) self.scheduler_job = SchedulerJob(subdir=os.devnull) tis = dr1.task_instances + dr2.task_instances for ti in tis: ti.state = State.SCHEDULED session.merge(ti) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=1, session=session) session.flush() assert [ti.key for ti in res] == [tis[1].key] session.rollback()
193
test_scheduler_job.py
Python
tests/jobs/test_scheduler_job.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
3
166,202
6
10
5
36
6
0
6
20
null_count
ENH: Implement DataFrame interchange protocol (#46141)
https://github.com/pandas-dev/pandas.git
def null_count(self) -> int: return self._col.isna().sum()
20
column.py
Python
pandas/core/exchange/column.py
90140f055892a46f473bd26affab88a7f171e394
pandas
1
305,039
5
6
2
20
3
1
5
11
async_remove
Add a callback for data flow handler removal (#77394) * Add a callback for when data flows are removed * Call `async_remove` at the very end * Handle and log exceptions caught during flow removal * Log the error as an exception, with a traceback * Adjust test's expected logging output to match updated format specifier
https://github.com/home-assistant/core.git
def async_remove(self) -> None: @callback
@callback
8
data_entry_flow.py
Python
homeassistant/data_entry_flow.py
2224d0f43a048052cfc4572df95c7afcccdf3a57
core
1
185,711
28
12
18
122
12
0
36
167
_binding_chain
Don't include self in DOMNode.ancestors any more As well as dropping `self` from the list that DOMNode.ancestors provides, this commit also adds DOMNode.ancestors_with_self, which maintains the previous behaviour of DOMNode.ancestors.
https://github.com/Textualize/textual.git
def _binding_chain(self) -> list[tuple[DOMNode, Bindings]]: focused = self.focused namespace_bindings: list[tuple[DOMNode, Bindings]] if focused is None: namespace_bindings = [ (self.screen, self.screen._bindings), (self, self._bindings), ] else: namespace_bindings = [ (node, node._bindings) for node in focused.ancestors_with_self ] return namespace_bindings
82
app.py
Python
src/textual/app.py
e3130f95c69648916f121e779a325b6f6f87e6ba
textual
3
177,497
12
9
3
55
8
0
14
23
multi_source_dijkstra_path
Hide edges with a weight of None in A*. (#5945) * Hide edges with a weight of None in A*. This matches the Dijkstra's weight interface. * Update Dijkstra's and A* docs for weights of None. * Add tests for A* with weight of None. * Add another test for A* with a weight function. * Document that None indicates a hidden edge.
https://github.com/networkx/networkx.git
def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"): length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight) return path
36
weighted.py
Python
networkx/algorithms/shortest_paths/weighted.py
d82815dba6c8ddce19cd49f700298dc82a58f066
networkx
1
224,010
29
12
7
69
11
1
29
108
load_file
Remove spaces at the ends of docstrings, normalize quotes
https://github.com/mkdocs/mkdocs.git
def load_file(self, config_file): try: return self.load_dict(utils.yaml_load(config_file)) except YAMLError as e: # MkDocs knows and understands ConfigurationErrors raise exceptions.ConfigurationError( f"MkDocs encountered an error parsing the configuration file: {e}" ) @contextmanager
@contextmanager
35
base.py
Python
mkdocs/config/base.py
e7f07cc82ab2be920ab426ba07456d8b2592714d
mkdocs
2
101,270
64
19
29
480
34
0
135
430
reload_images
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
https://github.com/deepfakes/faceswap.git
def reload_images(self, group_method, img_list): logger.info("Preparing to group...") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_blurs = [self.estimate_blur_fft(img) for img in image_list] temp_list = list(zip(filename_list, fft_blurs)) elif group_method == 'group_face_cnn': filename_list, image_list, landmarks = self._get_landmarks() temp_list = list(zip(filename_list, landmarks)) elif group_method == 'group_face_yaw': filename_list, image_list, landmarks = self._get_landmarks() yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks] temp_list = list(zip(filename_list, yaws)) elif group_method == 'group_hist': filename_list, image_list = self._get_images() histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list] temp_list = list(zip(filename_list, histograms)) elif group_method == 'group_black_pixels': filename_list, image_list = self._get_images() black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3 for img in image_list] temp_list = list(zip(filename_list, black_pixels)) else: raise ValueError(f"{group_method} group_method not found.") return self.splice_lists(img_list, temp_list)
301
sort.py
Python
tools/sort/sort.py
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
12
149,760
21
10
8
153
13
0
26
82
append_predictions
add freqao backend machinery, user interface, documentation
https://github.com/freqtrade/freqtrade.git
def append_predictions(self, predictions, do_predict, len_dataframe): ones = np.ones(len_dataframe) s_mean, s_std = ones*self.data['s_mean'], ones*self.data['s_std'] self.predictions = np.append(self.predictions,predictions) self.do_predict = np.append(self.do_predict,do_predict) self.target_mean = np.append(self.target_mean,s_mean) self.target_std = np.append(self.target_std,s_std) return
98
data_handler.py
Python
freqtrade/freqai/data_handler.py
fc837c4daa27a18ff0e86128f4d52089b88fa5fb
freqtrade
1
154,264
50
12
23
230
29
0
64
265
build_query_compiler
FEAT-#4733: Support fastparquet as engine for `read_parquet` (#4807) Signed-off-by: Karthik Velayutham <[email protected]>
https://github.com/modin-project/modin.git
def build_query_compiler(cls, dataset, columns, index_columns, **kwargs): storage_options = kwargs.pop("storage_options", {}) or {} col_partitions, column_widths = cls.build_columns(columns) partition_ids = cls.call_deploy( dataset, col_partitions, storage_options, **kwargs ) index, sync_index = cls.build_index(dataset, partition_ids, index_columns) remote_parts = cls.build_partition(partition_ids, column_widths) if len(partition_ids) > 0: row_lengths = [part.length() for part in remote_parts.T[0]] else: row_lengths = None frame = cls.frame_cls( remote_parts, index, columns, row_lengths=row_lengths, column_widths=column_widths, dtypes=None, ) if sync_index: frame.synchronize_labels(axis=0) return cls.query_compiler_cls(frame)
152
parquet_dispatcher.py
Python
modin/core/io/column_stores/parquet_dispatcher.py
b240370bf83c88589d293b76b4a2409294e06f90
modin
5
83,239
64
12
34
414
13
0
121
389
test_collapse_event
docs: Fix many spelling mistakes. Signed-off-by: Anders Kaseorg <[email protected]>
https://github.com/zulip/zulip.git
def test_collapse_event(self) -> None: client = self.get_client_descriptor() queue = client.event_queue queue.push({"type": "restart", "server_generation": 1, "timestamp": "1"}) # Verify the server_generation event is stored as a virtual event self.assertEqual( queue.virtual_events, {"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}}, ) # And we can reconstruct newest_pruned_id etc. self.verify_to_dict_end_to_end(client) queue.push({"type": "unknown", "timestamp": "1"}) self.assertEqual(list(queue.queue), [{"id": 1, "type": "unknown", "timestamp": "1"}]) self.assertEqual( queue.virtual_events, {"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}}, ) # And we can still reconstruct newest_pruned_id etc. correctly self.verify_to_dict_end_to_end(client) # Verify virtual events are converted to real events by .contents() self.assertEqual( queue.contents(), [ {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}, {"id": 1, "type": "unknown", "timestamp": "1"}, ], ) # And now verify to_dict after pruning queue.prune(0) self.verify_to_dict_end_to_end(client) queue.prune(1) self.verify_to_dict_end_to_end(client)
223
test_event_queue.py
Python
zerver/tests/test_event_queue.py
b0ce4f1bce8031881addecb1e86073483517f392
zulip
1
100,362
89
17
26
366
33
0
109
595
_download_model
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def _download_model(self): self.logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download) for attempt in range(self._retries): try: downloaded_size = self._url_partial_size req = urllib.request.Request(self._url_download) if downloaded_size != 0: req.add_header("Range", f"bytes={downloaded_size}-") with urllib.request.urlopen(req, timeout=10) as response: self.logger.debug("header info: {%s}", response.info()) self.logger.debug("Return Code: %s", response.getcode()) self._write_zipfile(response, downloaded_size) break except (socket_error, socket_timeout, urllib.error.HTTPError, urllib.error.URLError) as err: if attempt + 1 < self._retries: self.logger.warning("Error downloading model (%s). Retrying %s of %s...", str(err), attempt + 2, self._retries) else: self.logger.error("Failed to download model. Exiting. (Error: '%s', URL: " "'%s')", str(err), self._url_download) self.logger.info("You can try running again to resume the download.") self.logger.info("Alternatively, you can manually download the model from: %s " "and unzip the contents to: %s", self._url_download, self._cache_dir) sys.exit(1)
220
utils.py
Python
lib/utils.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
5
275,791
5
9
2
31
5
0
5
19
texts_to_sequences
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def texts_to_sequences(self, texts): return list(self.texts_to_sequences_generator(texts))
18
text.py
Python
keras/preprocessing/text.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
76,882
6
9
2
39
7
1
6
11
notification_static
Rename BASE_URL to WAGTAILADMIN_BASE_URL & add docs - add documentation for WAGTAILADMIN_BASE_URL - ensure that WAGTAILADMIN_BASE_URL is used via a util so that it can fallback from request - resolves #3248 - remove settings URL from pagination template - not required for query string URLs
https://github.com/wagtail/wagtail.git
def notification_static(path): return urljoin(base_url_setting(), static(path)) @register.simple_tag
@register.simple_tag
18
wagtailadmin_tags.py
Python
wagtail/admin/templatetags/wagtailadmin_tags.py
cb7fc4cb1403436c9e7803c341278c20d3b06158
wagtail
1
270,330
3
6
22
15
3
0
3
6
_make_eager_execution_function
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _make_eager_execution_function(model, mode):
119
distributed_training_utils_v1.py
Python
keras/distribute/distributed_training_utils_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
270,738
141
10
8
118
15
0
212
380
add_variable
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def add_variable(self, *args, **kwargs): warnings.warn( "`layer.add_variable` is deprecated and " "will be removed in a future version. " "Please use the `layer.add_weight()` method instead.", stacklevel=2, ) return self.add_weight(*args, **kwargs) ############################################################################## # Methods & attributes below are all private and only used by the framework. # ############################################################################## # See tf.Module for the usage of this property. # The key for _obj_reference_counts_dict is a Trackable, which could be a # variable or layer etc. tf.Module._flatten will fail to flatten the key # since it is trying to convert Trackable to a string. This attribute can be # ignored even after the fix of nest lib, since the trackable object should # already been available as individual attributes. _obj_reference_counts_dict # just contains a copy of them. _TF_MODULE_IGNORED_PROPERTIES = frozenset( itertools.chain( ("_obj_reference_counts_dict",), tf.Module._TF_MODULE_IGNORED_PROPERTIES, ) ) # When loading from a SavedModel, Layers typically can be revived into a # generic Layer wrapper. Sometimes, however, layers may implement methods # that go beyond this wrapper, as in the case of PreprocessingLayers' # `adapt` method. When this is the case, layer implementers can override # must_restore_from_config to return True; layers with this property must # be restored into their actual objects (and will fail if the object is # not available to the restoration code). _must_restore_from_config = False
36
base_layer.py
Python
keras/engine/base_layer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
22,670
4
6
2
19
3
0
4
18
width
refactor: clean code Signed-off-by: slowy07 <[email protected]>
https://github.com/geekcomputers/Python.git
def width(self): return self.__width
10
lib.py
Python
linear-algebra-python/src/lib.py
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
1
52,364
33
13
14
163
16
0
47
129
get_in_turn_repetition
update plato2_en_base (#2113) * update plato2_en_base * update README
https://github.com/PaddlePaddle/PaddleHub.git
def get_in_turn_repetition(pred, is_cn=False): if len(pred) == 0: return 1.0 if isinstance(pred[0], str): pred = [tok.lower() for tok in pred] if is_cn: pred = "".join(pred) tri_grams = set() for i in range(len(pred) - 2): tri_gram = tuple(pred[i:i + 3]) if tri_gram in tri_grams: return 1.0 tri_grams.add(tri_gram) return 0.0
107
model.py
Python
modules/text/text_generation/plato2_en_base/model.py
36ce4789c748040238a87598b5d6b2a5c817a73e
PaddleHub
7
247,331
7
9
5
45
7
0
7
21
test_legacy_on_create_room
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
https://github.com/matrix-org/synapse.git
def test_legacy_on_create_room(self) -> None: self.helper.create_room_as(self.user_id, tok=self.tok, expect_code=403)
28
test_third_party_rules.py
Python
tests/rest/client/test_third_party_rules.py
2ffaf30803f93273a4d8a65c9e6c3110c8433488
synapse
1
47,589
42
13
18
281
35
0
61
203
test_dag_removed_if_serialized_dag_is_removed
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_dag_removed_if_serialized_dag_is_removed(self, dag_maker): from airflow.operators.empty import EmptyOperator with dag_maker( dag_id="test_dag_removed_if_serialized_dag_is_removed", schedule_interval=None, start_date=tz.datetime(2021, 10, 12), ) as dag: EmptyOperator(task_id="task_1") dag_maker.create_dagrun() dagbag = DagBag(dag_folder=self.empty_dir, include_examples=False, read_dags_from_db=True) dagbag.dags = {dag.dag_id: SerializedDAG.from_dict(SerializedDAG.to_dict(dag))} dagbag.dags_last_fetched = {dag.dag_id: (tz.utcnow() - timedelta(minutes=2))} dagbag.dags_hash = {dag.dag_id: mock.ANY} assert SerializedDagModel.has_dag(dag.dag_id) is False assert dagbag.get_dag(dag.dag_id) is None assert dag.dag_id not in dagbag.dags assert dag.dag_id not in dagbag.dags_last_fetched assert dag.dag_id not in dagbag.dags_hash
180
test_dagbag.py
Python
tests/models/test_dagbag.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
1
256,315
45
10
20
191
14
0
71
171
reduce_annotations
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def reduce_annotations(anno_types, answers): for at in set(anno_types): assert at in ("no_answer", "short_answer") if anno_types.count("short_answer") >= anno_types.count("no_answer"): majority = "short_answer" is_impossible = False else: majority = "no_answer" is_impossible = True answers = [a for at, a in zip(anno_types, answers) if at == majority] reduction = len(anno_types) - len(answers) assert reduction < 3 if not is_impossible: global n_no_ans n_no_ans += reduction else: global n_short n_short += reduction answers = [] return answers, is_impossible
112
nq_to_squad.py
Python
test/benchmarks/nq_to_squad.py
a59bca366174d9c692fa19750c24d65f47660ef7
haystack
6
681
8
8
4
27
2
0
9
18
reference_scalar_manager
Reverting changes to REPT and moving new code to NDEPT - Added tests and methods to lazyrepeatarray - Started fleshing out NDEPT - Added serde to NDEPT - Added test for NDEPT serde
https://github.com/OpenMined/PySyft.git
def reference_scalar_manager() -> VirtualMachinePrivateScalarManager: reference_scalar_manager = VirtualMachinePrivateScalarManager() return reference_scalar_manager
14
ndim_entity_phi_test.py
Python
packages/syft/tests/syft/core/tensor/adp/ndim_entity_phi_test.py
d8bf644d529b155467539a707cf5f8538ae41283
PySyft
1
247,933
59
8
21
221
23
0
88
264
test_cancellation
Convert `Linearizer` tests from `inlineCallbacks` to async (#12353) Signed-off-by: Sean Quah <[email protected]>
https://github.com/matrix-org/synapse.git
def test_cancellation(self) -> None: linearizer = Linearizer() key = object() d1, acquired_d1, unblock1 = self._start_task(linearizer, key) self.assertTrue(acquired_d1.called) # Create a second task, waiting for the first task. d2, acquired_d2, _ = self._start_task(linearizer, key) self.assertFalse(acquired_d2.called) # Create a third task, waiting for the second task. d3, acquired_d3, unblock3 = self._start_task(linearizer, key) self.assertFalse(acquired_d3.called) # Cancel the waiting second task. d2.cancel() unblock1() self.successResultOf(d1) self.assertTrue(d2.called) self.failureResultOf(d2, CancelledError) # The third task should continue running. self.assertTrue( acquired_d3.called, "Third task did not get the lock after the second task was cancelled", ) unblock3() self.successResultOf(d3)
134
test_linearizer.py
Python
tests/util/test_linearizer.py
41b5f72677ea9763f3cf920d4f6df507653222f2
synapse
1
264,000
85
13
11
170
14
0
130
216
add_suffix_to_extension
building: move filename processing of EXTENSION entries to analysis stage Move filename processing of EXTENSION TOC entries (i.e., converting the module name to file path and adding the suffix) from the build stage (i.e., `assemble` in `PKG`, `COLLECT`, and `BUNDLE`) into analysis stage. This ensures that during the build stage, the EXTENSION entries in the TOC are already full filenames, same as other file-based entries (DATA, BINARY, etc.). This in turn means that the `add_suffix_to_extension` helper does not need to worry about DEPENDENCY entries anymore, and can process only EXTENSION ones, as implied by its name. Early conversion of EXTENSION module names to file names also prevents duplication when the same file is collected as both an EXTENSION and some other type, for example DATA: ``` ('torch._C', '...\\site-packages\\torch\\_C.cp39-win_amd64.pyd', 'EXTENSION'), ('torch\\_C.cp39-win_amd64.pyd', '...\\site-pakages\\torch\\_C.cp39-win_amd64.pyd', 'DATA'), ``` Prior to this commit, the entries were considered different from the `TOC` perspective, but led to duplication in onefile build's PKG once extension's name was changed to the file name (whereas in onedir build, the first entry was overwritten by the second).
https://github.com/pyinstaller/pyinstaller.git
def add_suffix_to_extension(inm, fnm, typ): # No-op for non-extension if typ != 'EXTENSION': return inm, fnm, typ # If inm completely fits into end of the fnm, it has already been processed. if fnm.endswith(inm): return inm, fnm, typ # Change the dotted name into a relative path. This places C extensions in the Python-standard location. inm = inm.replace('.', os.sep) # In some rare cases extension might already contain a suffix. Skip it in this case. if os.path.splitext(inm)[1] not in EXTENSION_SUFFIXES: # Determine the base name of the file. base_name = os.path.basename(inm) assert '.' not in base_name # Use this file's existing extension. For extensions such as ``libzmq.cp36-win_amd64.pyd``, we cannot use # ``os.path.splitext``, which would give only the ```.pyd`` part of the extension. inm = inm + os.path.basename(fnm)[len(base_name):] return inm, fnm, typ
103
utils.py
Python
PyInstaller/building/utils.py
04984a040c2396127f234518f783cbed088408bb
pyinstaller
4
86,877
9
9
5
34
3
0
10
33
clear_region_to_control_producer
chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890) In the control silo, creating an audit log entry writes to the db directly, whilst in region silo mode creating an audit log entry will instead push to a new kafka producer that consumes into the control silo asynchronously.
https://github.com/getsentry/sentry.git
def clear_region_to_control_producer(): global _publisher if _publisher: _publisher.close() _publisher = None
18
producer.py
Python
src/sentry/region_to_control/producer.py
941184cd24186324fd9f7f304b7f713041834726
sentry
2