ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
@router.get("/version")
11,931
59,709
13
src/prefect/orion/api/admin.py
8
8
async def read_settings() -> prefect.settings.Settings: r
Add secret flag to settings and obfuscate by default when displayed (#7465)
read_settings
902dfa4bd3b6e330e4374eb1e04de064148a2f32
prefect
admin.py
10
7
https://github.com/PrefectHQ/prefect.git
1
23
1
8
56
Python
{ "docstring": "\n Get the current Orion settings.\n\n Secret setting values will be obfuscated.\n ", "language": "en", "n_whitespaces": 21, "n_words": 11, "vocab_size": 11 }
async def read_settings() -> prefect.settings.Settings: return prefect.settings.get_current_settings().with_obfuscated_secrets() @router.get("/version")
36,505
156,012
59
dask/array/core.py
20
10
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs): from dask.array.overlap import map_overlap return map_overlap(
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
map_overlap
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
core.py
8
5
https://github.com/dask/dask.git
1
51
0
19
71
Python
{ "docstring": "Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in :func:`map_blocks <dask.array.core.map_blocks>` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in :func:`map_blocks <dask.array.core.map_blocks>`.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect')\n >>> y.compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n ", "language": "en", "n_whitespaces": 1096, "n_words": 435, "vocab_size": 223 }
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs): from dask.array.overlap import map_overlap return map_overlap( func, self, depth=depth, boundary=boundary, trim=trim, **kwargs )
50,614
204,011
98
django/contrib/gis/gdal/raster/source.py
33
7
def _flush(self): # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException( "Raster needs to be opened in write mode to change values." ) capi
Refs #33476 -- Reformatted code with Black.
_flush
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
source.py
10
6
https://github.com/django/django.git
2
25
0
30
47
Python
{ "docstring": "\n Flush all data from memory into the source file if it exists.\n The data that needs flushing are geotransforms, coordinate systems,\n nodata_values and pixel values. This function will be called\n automatically wherever it is needed.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 33 }
def _flush(self): # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException( "Raster needs to be opened in write mode to change values." ) capi.flush_ds(self._ptr)
55,798
219,783
185
python3.10.4/Lib/_pydecimal.py
82
10
def _round(self, places, rounding): if places <= 0: raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it ca
add python 3.10.4 for windows
_round
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_pydecimal.py
14
9
https://github.com/XX-net/XX-Net.git
5
84
0
66
141
Python
{ "docstring": "Round a nonzero, nonspecial Decimal to a fixed number of\n significant figures, using the given rounding mode.\n\n Infinities, NaNs and zeros are returned unaltered.\n\n This operation is quiet: it raises no flags, and uses no\n information from the context.\n\n ", "language": "en", "n_whitespaces": 74, "n_words": 39, "vocab_size": 35 }
def _round(self, places, rounding): if places <= 0: raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans
4,181
22,104
24
pipenv/patched/pip/_vendor/requests/models.py
10
5
def is_redirect(self): r
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
is_redirect
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
models.py
8
2
https://github.com/pypa/pipenv.git
2
18
0
9
33
Python
{ "docstring": "True if this Response is a well-formed HTTP redirect that could have\n been processed automatically (by :meth:`Session.resolve_redirects`).\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 17 }
def is_redirect(self): return "location" in self.headers and self.status_code in REDIRECT_STATI
39,293
162,760
65
research/neo_peq/legacy_frequency_response.py
19
13
def write_eqapo_graphic_eq(self, file_path, normalize=True): file_path = os.path.abspath(file_path) s = self.eqapo_graphic_eq(normalize=normalize) with open(file_path, 'w', encoding='utf-8') as f: f.write(s) return s
Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.
write_eqapo_graphic_eq
9120cdffe618c6c2ff16fe6a311b6a1367efdbc8
AutoEq
legacy_frequency_response.py
12
6
https://github.com/jaakkopasanen/AutoEq.git
1
54
0
17
92
Python
{ "docstring": "Writes equalization graph to a file as Equalizer APO config.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def write_eqapo_graphic_eq(self, file_path, normalize=True): file_path = os.path.abspath(file_path) s = self.eqapo_graphic_eq(normalize=normalize) with open(file_path, 'w', encoding='utf-8') as f: f.write(s) return s
43,393
181,605
142
tests/export_tests.py
29
14
def test_export_pipeline_6(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = exported_code = export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=42, data_file_path='test_path') assert expected_code
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_export_pipeline_6
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
export_tests.py
10
35
https://github.com/EpistasisLab/tpot.git
1
55
0
24
96
Python
{ "docstring": "Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np\nimport pandas as pd\nfrom sklearn.feature_selection import SelectPercentile, f_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.tree import DecisionTreeClassifier\nfrom tpot.export_utils import set_param_recursive\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('test_path', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\\\n train_test_split(features, tpot_data['target'], random_state=42)\n\nexported_pipeline = make_pipeline(\n SelectPercentile(score_func=f_classif, percentile=20),\n DecisionTreeClassifier(criterion=\"gini\", max_depth=8, min_samples_leaf=5, min_samples_split=5)\n)\n# Fix random state for all the steps in exported pipeline\nset_param_recursive(exported_pipeline.steps, 'random_state', 42)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n", "language": "en", "n_whitespaces": 102, "n_words": 102, "vocab_size": 82 }
def test_export_pipeline_6(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = exported_code = export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=42, data_file_path='test_path') assert expected_code == exported_code
42,027
176,659
669
networkx/algorithms/connectivity/tests/test_edge_kcomponents.py
275
32
def _check_edge_connectivity(G): # Construct the auxiliary graph that can be used to make each k-cc or k-sub aux_graph = EdgeComponentAuxGraph.construct(G) # memoize the local connectivity in this graph memo = {} for k in it.count(1): # Test "local" k-edge-components and k-edge-subgraphs ccs_local = fset(aux_graph.k_edge_components(k)) ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) # Check connectivity properties that should be guaranteed by the # algorithms. _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) if k == 1 or k == 2 and not G.is_directed(): assert ( ccs_local == ccs_subgraph ), "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" if G.is_directed(): # Test special case methods are the same as the aux graph if k == 1: alt_sccs = fset(nx.strongly_connected_components(G)) assert alt_sccs == ccs_local, "k=1 failed alt" assert alt_sccs == ccs_subgraph, "k=1 failed alt" else: # Test special case methods are the same as the aux gra
doc: fix typos in docstring and comment (#5647)
_check_edge_connectivity
26b7de005ac562786f72b24a73af5a59bbab6953
networkx
test_edge_kcomponents.py
18
32
https://github.com/networkx/networkx.git
13
235
0
128
393
Python
{ "docstring": "\n Helper - generates all k-edge-components using the aux graph. Checks the\n both local and subgraph edge connectivity of each cc. Also checks that\n alternate methods of computing the k-edge-ccs generate the same result.\n ", "language": "en", "n_whitespaces": 47, "n_words": 33, "vocab_size": 29 }
def _check_edge_connectivity(G): # Construct the auxiliary graph that can be used to make each k-cc or k-sub aux_graph = EdgeComponentAuxGraph.construct(G) # memoize the local connectivity in this graph memo = {} for k in it.count(1): # Test "local" k-edge-components and k-edge-subgraphs ccs_local = fset(aux_graph.k_edge_components(k)) ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) # Check connectivity properties that should be guaranteed by the # algorithms. _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) if k == 1 or k == 2 and not G.is_directed(): assert ( ccs_local == ccs_subgraph ), "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" if G.is_directed(): # Test special case methods are the same as the aux graph if k == 1: alt_sccs = fset(nx.strongly_connected_components(G)) assert alt_sccs == ccs_local, "k=1 failed alt" assert alt_sccs == ccs_subgraph, "k=1 failed alt" else: # Test special case methods are the same as the aux graph if k == 1: alt_ccs = fset(nx.connected_components(G)) assert alt_ccs == ccs_local, "k=1 failed alt" assert alt_ccs == ccs_subgraph, "k=1 failed alt" elif k == 2: alt_bridge_ccs = fset(bridge_components(G)) assert alt_bridge_ccs == ccs_local, "k=2 failed alt" assert alt_bridge_ccs == ccs_subgraph, "k=2 failed alt" # if new methods for k == 3 or k == 4 are implemented add them here # Check the general subgraph method works by itself alt_subgraph_ccs = fset( [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] ) assert alt_subgraph_ccs == ccs_subgraph, "alt subgraph method failed" # Stop once k is larger than all special case methods # and we cannot break down ccs any further. if k > 2 and all(len(cc) == 1 for cc in ccs_local): break # ---------------- # Misc tests # ----------------
50,000
201,816
163
tests/backends/tests.py
53
17
def test_sequence_name_length_limits_flush(self): # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_
Refs #33476 -- Reformatted code with Black.
test_sequence_name_length_limits_flush
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
10
11
https://github.com/django/django.git
1
60
0
44
98
Python
{ "docstring": "\n Sequence resetting as part of a flush with model with long name and\n long pk name doesn't error (#8901).\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 16 }
def test_sequence_name_length_limits_flush(self): # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through ) tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True) connection.ops.execute_sql_flush(sql_list)
72,616
249,109
762
tests/rest/admin/test_media.py
159
38
def test_delete_media(self) -> None: download_resource = self.media_repo.children[b"download"] upload_resource = self.media_repo.children[b"upload"] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = r
Use literals in place of `HTTPStatus` constants in tests (#13469)
test_delete_media
c97042f7eef3748e17c90e48a4122389a89c4735
synapse
test_media.py
11
61
https://github.com/matrix-org/synapse.git
1
297
0
91
464
Python
{ "docstring": "\n Tests that delete a media is successfully\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def test_delete_media(self) -> None: download_resource = self.media_repo.children[b"download"] upload_resource = self.media_repo.children[b"upload"] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_and_media_id.split("/") self.assertEqual(server_name, self.server_name) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) # Should be successful self.assertEqual( 200, channel.code, msg=( "Expected to receive a 200 on accessing media: %s" % server_and_media_id ), ) # Test if the file exists local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, media_id) # Delete media channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, channel.json_body["deleted_media"][0], ) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.NOT_FOUND, channel.code, msg=( "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s" % server_and_media_id ), ) # Test if the file is deleted self.assertFalse(os.path.exists(local_path))
1,233
7,623
114
ludwig/visualize.py
40
20
def load_data_for_viz(load_type, model_file_statistics, **kwargs): supported_load_types = dict( load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get("dtype", int), ground_truth_split=kwar
Encoder refactor V2 (#2370) * Added base files and some initial code * More files created, fleshing out binary feature and corresponding encoders * Added more schema infra * Registered all feature encoders * Separated feature utils infra * Added all preprocessing classes * Filled out rest of schema configs * Fixed preproc dataclass * Fixed small errors blocking import * Tests should be passing * Deleted unnecesssary files and removed commented out code * fixed flake8 * Fixed most tests * fixed pattern validation * Fixed missing val strategies and solved custom encoder update issue * Removed preprocessing from features due to schema SSOT * fix flake 8 * Started encoder schema work * Parallel CNN Encoder * StackedCNN Encoder * Added image encoders * Finished sequence encoders * Partway through text encoders * Added text encoders * Bag Encoders * Binary and Date Encoders * category, date, h3, and set encoders * Wired up encoder schemas * Switched input feature encoder schema definitions * Fixed handful of issues * Fix schema issues * Refactored a bunch of test configs * Small changes * Removed default param from register_encoder * Schema working now, working on refactoring * Finished decoder schemas * Removed default param from register_decoder * Added some default params to output features and more decoder work * Refactored all input feature encoder/decoder referencing * Refactored pretty much all the tests * Added back constants * Solved gbm issue * Fixed save_load test * various fixes * Fixed import issue * Flake 8 and various fixes * Solved more failed tests * Refactored missed tests * Removed commented lines * Added init file for decoders schema * Fixed failing tests * Fixed hyperopt shared params test * Added backwards compatability logic and test * Flake 8 * removed comment * Added base files and some initial code * More files created, fleshing out binary feature and corresponding encoders * Added more schema infra * Registered all feature encoders * Separated feature utils infra * Added all preprocessing classes * Filled out rest of schema configs * Fixed preproc dataclass * Fixed small errors blocking import * Tests should be passing * Deleted unnecesssary files and removed commented out code * fixed flake8 * Fixed most tests * fixed pattern validation * Fixed missing val strategies and solved custom encoder update issue * Removed preprocessing from features due to schema SSOT * fix flake 8 * Started encoder schema work * Parallel CNN Encoder * StackedCNN Encoder * Added image encoders * Finished sequence encoders * Partway through text encoders * Added text encoders * Bag Encoders * Binary and Date Encoders * category, date, h3, and set encoders * Wired up encoder schemas * Switched input feature encoder schema definitions * Fixed handful of issues * Fix schema issues * Refactored a bunch of test configs * Small changes * Removed default param from register_encoder * Schema working now, working on refactoring * Finished decoder schemas * Removed default param from register_decoder * Added some default params to output features and more decoder work * Refactored all input feature encoder/decoder referencing * Refactored pretty much all the tests * Added back constants * Solved gbm issue * Fixed save_load test * various fixes * Fixed import issue * Flake 8 and various fixes * Solved more failed tests * Refactored missed tests * Removed commented lines * Added init file for decoders schema * Fixed failing tests * Fixed hyperopt shared params test * Added backwards compatability logic and test * Flake 8 * removed comment * Skipping CTRL Encoder test since it's blasting memory * Fixed audio_feature test * Addressed failing tests * Fixed backwards compatability * Fixed more failing tests * Flake 8 * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored default logic for all features * Fixed H3 weighted_sum encoder wrong type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix import issue * Mark slow HF tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed defaults tests * Pin Ray nightly version * fix link * pin torch to 07/26 * cleanup * upgrade ray pinned version to enable parquet partition filtering * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * downgrade Ray to ensure TensorDtypes are not inferred during Ray Dataset <=> Dask conversions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed custom encoder decoder helper method * unpin torch * Flake 8 * Daniel feedback * Small fixes * Fixed default weights init * Added test with encoder dependencies for global defaults * Fixed Arnav's test * Addressed Arnav's feedback * Address nit * Addressed feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address nit * Fix test * Initial feedback refactor * More refactoring * Added vocab field to all text_encoder configs * More refactoring * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix audio feature test, also s/logging/logger. * param names should start with lowercase s/N/n * Re-added schema utils used in encoder refactor. * Removes unused overwrite_defaults() * Oops, name is passed to feature as a kwarg not a member of the feature config. Why? Probably should change that. * Change lowercase default back to True. Fixes test_strings_utils * Set feature validation error with output size 1. * MLP mixer encoder needs num_channels. * Use schema.dump instead of .__dict__ to convert marshmallow dataclass to dict * (x,) in python is a tuple with a single element x. Watch out for this when defining schemas. * Construct features by using build_single_input/output to share code for deserializing feature configs. Also changes ECD to BaseModel, IMO its confusing to import ECD to use a class method from BaseModel. * Fix test_trainer_utils, adds convenience method BaseFeature.load_from_dictionary * Use feature load_from_dictionary instead of BaseModel in feature tests. * Populate encoder and decoder types in shared test fixtures, fixes error expectations in test_validate_config_combiner.py * Fixes test_validate_config_misc.py by ensuring only one option of OneOf allows None, because OneOf fails validation if more than one condition match. * Updates test_defaults.py * Adds type, column, proc_column to feature schemas. Revert feature tests by passing in config dict again. * decorate feature base classes with @dataclass, fixes failure building input features in trainer. * Implement _serialize for PreprocessingDataclassField. * use type(feature) to get schema class. * Fix test_trainer_utils.py * audio_feature requires embedding_size, but passthrough encoder does not have this property. Technically, passthrough encoder is not supported for audio features. * Wow, apparently the order of elements in the oneOf affects which error message we get from jsonschema. * Get default encoders from feature schema. * Get encoder defaults from schema in config_utils.py * Make number feature allow decoders without clip property * s/list/List * Adds reduce_output to h3 encoder. * Moves decoder params into nested decoder. * Update processing parameters with computed_fill_value. * Removes test code. * Adds input_size to decoder base because some features assume decoders have an input_size * dense encoder not supported for bag features, changed to embed. * Adds input_size param to dense encoder schema, since its a required parameter of dense encoder. * Fixes vector feature input_size in encoder metadata. * Fixes test reducers, set sequence reduce mode in output feature base. * Don't nest encoder parameters in decoder * Fixes test_torchscript, get num_classes from encoder config. * Audio feature padding is float, not int. * Adds temp check for threshold to fix GBM tests. * Adds missing value strategy drop_row for vector feature in test. * Drop row should work even if computed_fill_value is an empty string * Removes duplicated TOP_K constant. * Consolidated set_default_values * Removes commented-out defaults. * Remove load_config from OutputFeature, it isn't doing anything here. * Removes comment. * Fix type annotations for input/output feature constructors. * Fixes output feature dependencies being ignored. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adds test for construction of output features with dependencies. * Encoder/Decoder config now lives on encoder/decoder object * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes decoder params to match their respective classes. Moves fc_stack params and threshold back to output feature. * Make clip property of number output feature again. * Adds threshold property to set feature schema, use this property instead of storing it in the decoder. * input_size in output_feature instead of decoder. * Made vector_size property of vector_feature. * Fixed gbm tests * Fixed flake 8 * Re-adds num_classes as member of category output feature. * Makes vocab_size match vocab used in preprocessing. * num_classes in CategoryOutputFeature. * Moves num_classes from decoder to category output feature. * Fixes test_model_training_options. Copies fc_layer keys into decoder if they are present on output features. * Adds field descriptors for fc_layers params in BaseOutputFeatureConfig. Co-authored-by: connor-mccorm <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: connor-mccorm <[email protected]> Co-authored-by: Geoffrey Angus <[email protected]> Co-authored-by: Arnav Garg <[email protected]> Co-authored-by: Daniel Treiman <[email protected]>
load_data_for_viz
03b4ab273abd7e22a56bb550b56f3d667200abf9
ludwig
visualize.py
15
14
https://github.com/ludwig-ai/ludwig.git
3
86
0
37
139
Python
{ "docstring": "Load model file data in to list of .\n\n :param load_type: type of the data loader to be used.\n :param model_file_statistics: JSON file or list of json files containing any\n model experiment stats.\n :return List of training statistics loaded as json objects.\n ", "language": "en", "n_whitespaces": 64, "n_words": 42, "vocab_size": 32 }
def load_data_for_viz(load_type, model_file_statistics, **kwargs): supported_load_types = dict( load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get("dtype", int), ground_truth_split=kwargs.get("ground_truth_split", 2) ), ) loader = supported_load_types[load_type] try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logger.exception(f"Unable to open model statistics file {model_file_statistics}!") raise return stats_per_model
1,663
9,733
134
gensim/models/doc2vec.py
52
15
def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None): logger.info("collecting all words and their counts") if corpus_file
re #2809: update the doc2vec notebook
scan_vocab
490676cc34d909b8a361fa1ae1e835263a13673b
gensim
doc2vec.py
10
10
https://github.com/RaRe-Technologies/gensim.git
2
83
0
43
127
Python
{ "docstring": "Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count.\n\n Parameters\n ----------\n documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional\n The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster).\n corpus_file : str, optional\n Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.\n You may use this argument instead of `documents` to get performance boost. Only one of `documents` or\n `corpus_file` arguments need to be passed (not both of them).\n progress_per : int\n Progress will be logged every `progress_per` documents.\n trim_rule : function, optional\n Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,\n be trimmed away, or handled using the default (discard if word count < min_count).\n Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),\n or a callable that accepts parameters (word, count, min_count) and returns either\n :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.\n The rule, if given, is only used to prune vocabulary during\n :meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model.\n\n The input parameters are of the following types:\n * `word` (str) - the word we are examining\n * `count` (int) - the word's frequency count in the corpus\n * `min_count` (int) - the minimum count threshold.\n\n Returns\n -------\n (int, int)\n Tuple of `(total words in the corpus, number of documents)`.\n\n ", "language": "en", "n_whitespaces": 487, "n_words": 218, "vocab_size": 148 }
def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None): logger.info("collecting all words and their counts") if corpus_file is not None: corpus_iterable = TaggedLineDocument(corpus_file) total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule) logger.info( "collected %i word types and %i unique tags from a corpus of %i examples and %i words", len(self.raw_vocab), len(self.dv), corpus_count, total_words, ) return total_words, corpus_count
56,800
222,893
1,131
python3.10.4/Lib/distutils/dist.py
337
50
def _parse_command_opts(self, parser, args): # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit("invalid command name '%s'" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise Distut
add python 3.10.4 for windows
_parse_command_opts
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
dist.py
19
54
https://github.com/XX-net/XX-Net.git
18
357
0
203
597
Python
{ "docstring": "Parse the command-line options for a single command.\n 'parser' must be a FancyGetopt instance; 'args' must be the list\n of arguments, starting with the current command (whose options\n we are about to parse). Returns a new version of 'args' with\n the next command at the front of the list; will be the empty\n list if there are no more commands on the command line. Returns\n None if the user asked for help on this command.\n ", "language": "en", "n_whitespaces": 126, "n_words": 75, "vocab_size": 48 }
def _parse_command_opts(self, parser, args): # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit("invalid command name '%s'" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( "command class %s must subclass Command" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): msg = ("command class %s must provide " "'user_options' attribute (a list of tuples)") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( "invalid help function %r for help option '%s': " "must be a callable object (function, etc.)" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = ("command line", value) return args
69,784
242,085
658
scipy/stats/_distn_infrastructure.py
213
28
def interval(self, confidence=None, *args, **kwds): # This function was originally written with parameter `alpha`, but # `alpha` is also the name of a shape parameter of two distributions. # This block allows the functi
MAINT: stats: update deprecation warning version information
interval
547d1bb522562a1ba38961d13932fffc2bb92edf
scipy
_distn_infrastructure.py
14
30
https://github.com/scipy/scipy.git
10
219
0
134
366
Python
{ "docstring": "Confidence interval with equal areas around the median.\n\n .. deprecated:: 1.9.0\n Parameter `alpha` is replaced by parameter `confidence` to avoid\n name collisions with the shape parameter `alpha` of some\n distributions. Parameter `alpha` will be removed in the second\n release after 1.9.0.\n\n Parameters\n ----------\n confidence : array_like of float\n Probability that an rv will be drawn from the returned range.\n Each value should be in the range [0, 1].\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n\n Returns\n -------\n a, b : ndarray of float\n end-points of range that contain ``100 * alpha %`` of the rv's\n possible values.\n\n ", "language": "en", "n_whitespaces": 333, "n_words": 128, "vocab_size": 90 }
def interval(self, confidence=None, *args, **kwds): # This function was originally written with parameter `alpha`, but # `alpha` is also the name of a shape parameter of two distributions. # This block allows the function to accept both `alpha` and its # replacement `confidence` during a deprecation period; it can be # removed in the second release after 1.9.0. # See description of logic in `moment` method. has_shape_alpha = (self.shapes is not None and "alpha" in (self.shapes.split(", "))) got_confidence = confidence is not None got_keyword_alpha = kwds.get("alpha", None) is not None if not got_confidence and ((not got_keyword_alpha) or (got_keyword_alpha and has_shape_alpha)): message = ("interval() missing 1 required positional argument: " "`confidence`") raise TypeError(message) if got_keyword_alpha and not has_shape_alpha: if got_confidence: # this will change to "interval got unexpected argument alpha" message = "interval() got multiple values for first argument" raise TypeError(message) else: message = ("Use of keyword argument `alpha` for method " "`interval` is deprecated. Use first positional " "argument or keyword argument `confidence` " "instead.") confidence = kwds.pop("alpha") warnings.warn(message, DeprecationWarning, stacklevel=2) alpha = confidence alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError("alpha must be between 0 and 1 inclusive") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b
21,158
101,754
336
tools/alignments/jobs_faces.py
80
25
def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc="Updating Alignments File from PNG Header", leave=False): src = meta["source"] alignment = meta["alignments"] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src["source_filename"]) if len(faces) < src["face_index"] + 1: # list index out of range logger.debug("Skipped face '%s'. Index does not exist in alignments file", src["original_filename"]) continue face = faces[src["face_index"]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info("Updated alignments file from PNG Data: %s", self._counts) return retval
Alignments Tool updates - Copy info back to alignments file from faces
__call__
c79175cbde5600bebd65785f3821fc74b3a80cbe
faceswap
jobs_faces.py
13
27
https://github.com/deepfakes/faceswap.git
6
138
0
62
231
Python
{ "docstring": " Parse through the face data updating any entries in the alignments file.\n\n Returns\n -------\n bool\n ``True`` if any alignment information was updated otherwise ``False``\n ", "language": "en", "n_whitespaces": 64, "n_words": 24, "vocab_size": 22 }
def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc="Updating Alignments File from PNG Header", leave=False): src = meta["source"] alignment = meta["alignments"] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src["source_filename"]) if len(faces) < src["face_index"] + 1: # list index out of range logger.debug("Skipped face '%s'. Index does not exist in alignments file", src["original_filename"]) continue face = faces[src["face_index"]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info("Updated alignments file from PNG Data: %s", self._counts) return retval
116,979
319,622
116
src/documents/tests/test_file_handling.py
22
19
def test_dynamic_path(self): doc = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),
Feature: Dynamic document storage pathes (#916) * Added devcontainer * Add feature storage pathes * Exclude tests and add versioning * Check escaping * Check escaping * Check quoting * Echo * Escape * Escape : * Double escape \ * Escaping * Remove if * Escape colon * Missing \ * Esacpe : * Escape all * test * Remove sed * Fix exclude * Remove SED command * Add LD_LIBRARY_PATH * Adjusted to v1.7 * Updated test-cases * Remove devcontainer * Removed internal build-file * Run pre-commit * Corrected flak8 error * Adjusted to v1.7 * Updated test-cases * Corrected flak8 error * Adjusted to new plural translations * Small adjustments due to code-review backend * Adjusted line-break * Removed PAPERLESS prefix from settings variables * Corrected style change due to search+replace * First documentation draft * Revert changes to Pipfile * Add sphinx-autobuild with keep-outdated * Revert merge error that results in wrong storage path is evaluated * Adjust styles of generated files ... * Adds additional testing to cover dynamic storage path functionality * Remove unnecessary condition * Add hint to edit storage path dialog * Correct spelling of pathes to paths * Minor documentation tweaks * Minor typo * improving wrapping of filter editor buttons with new storage path button * Update .gitignore * Fix select border radius in non input-groups * Better storage path edit hint * Add note to edit storage path dialog re document_renamer * Add note to bulk edit storage path re document_renamer * Rename FILTER_STORAGE_DIRECTORY to PATH * Fix broken filter rule parsing * Show default storage if unspecified * Remove note re storage path on bulk edit * Add basic validation of filename variables Co-authored-by: Markus Kling <[email protected]> Co-authored-by: Trenton Holmes <[email protected]> Co-authored-by: Michael Shamoon <[email protected]> Co-authored-by: Quinn Casey <[email protected]>
test_dynamic_path
69ef26dab04d51e7e102dcb33cd98ddc6ad975fd
paperless-ngx
test_file_handling.py
13
10
https://github.com/paperless-ngx/paperless-ngx.git
1
81
0
22
127
Python
{ "docstring": "\n GIVEN:\n - A document with a defined storage path\n WHEN:\n - the filename is generated for the document\n THEN:\n - the generated filename uses the defined storage path for the document\n ", "language": "en", "n_whitespaces": 93, "n_words": 31, "vocab_size": 17 }
def test_dynamic_path(self): doc = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type="application/pdf", pk=2, checksum="2", storage_path=StoragePath.objects.create(path="TestFolder/{created}"), ) self.assertEqual(generate_filename(doc), "TestFolder/2020-06-25.pdf")
34,736
150,412
71
freqtrade/rpc/replicate/__init__.py
17
8
async def follower_loop(self): try: await self._connect_to_leaders() except Exception as e: logger.error("Exception occurred in follower loop: ") logger.exception(e)
initial concept for replicate, basic leader and follower logic
follower_loop
9f6bba40af1a407f190a89f5c0c8b4e3f528ba46
freqtrade
__init__.py
11
6
https://github.com/freqtrade/freqtrade.git
2
31
0
17
60
Python
{ "docstring": "\n Main follower coroutine\n\n This starts all of the leader connection coros\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
async def follower_loop(self): try: await self._connect_to_leaders() except Exception as e: logger.error("Exception occurred in follower loop: ") logger.exception(e)
21,711
103,727
52
kitty_tests/check_build.py
18
11
def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0)
Fix regression in 0.26.0 that caused launching kitty without working STDIO handles to result in high CPU usage and prewarming failing Fixes #5444
test_launcher_ensures_stdio
6604e0d015fbd7a3e5602a6f3831d786b4ed659d
kitty
check_build.py
10
15
https://github.com/kovidgoyal/kitty.git
1
42
0
16
71
Python
{ "docstring": "\\\nimport os, sys\nif sys.stdin:\n os.close(sys.stdin.fileno())\nif sys.stdout:\n os.close(sys.stdout.fileno())\nif sys.stderr:\n os.close(sys.stderr.fileno())\nos.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')\n", "language": "en", "n_whitespaces": 37, "n_words": 34, "vocab_size": 26 }
def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0)
78,160
265,647
106
netbox/dcim/tests/test_forms.py
20
13
def test_interface_label_count_mismatch(self):
Fixes #10247: Allow changing selected device/VM when creating a new component (#10312) * Initial work on #10247 * Continued work on #10247 * Clean up component creation tests * Move valdiation of replicated field to form * Clean up ordering of fields in component creation forms * Omit fieldset header if none * Clean up ordering of fields in component template creation forms * View tests should not move component templates to new device type * Define replication_fields on VMInterfaceCreateForm * Clean up expandable field help texts * Update comments * Update component bulk update forms & views to support new replication fields * Fix ModularDeviceComponentForm parent class * Fix bulk creation of VM interfaces (thanks @kkthxbye-code!)
test_interface_label_count_mismatch
c4b7ab067a914349abd88398dd9bfef9f6c2f806
netbox
test_forms.py
10
10
https://github.com/netbox-community/netbox.git
1
58
0
19
105
Python
{ "docstring": "\n Check that attempting to generate a differing number of names and labels results in a validation error.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
def test_interface_label_count_mismatch(self): bad_interface_data = { 'device': self.device.pk, 'name': 'eth[0-9]', 'label': 'Interface[0-1]', 'type': InterfaceTypeChoices.TYPE_1GE_GBIC, } form = InterfaceCreateForm(bad_interface_data) self.assertFalse(form.is_valid()) self.assertIn('label', form.errors)
75,716
259,334
22
sklearn/preprocessing/_data.py
13
8
def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True): pt = PowerTransformer(method=method, stand
DOC Ensures that preprocessing._data.power_transform passes numpydoc validation (#22802) Co-authored-by: Jérémie du Boisberranger <[email protected]>
power_transform
5cccdef4378fcdb863467414ee638c6f5e51a19a
scikit-learn
_data.py
9
3
https://github.com/scikit-learn/scikit-learn.git
1
43
0
13
67
Python
{ "docstring": "Parametric, monotonic transformation to make data more Gaussian-like.\n\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n or other situations where normality is desired.\n\n Currently, power_transform supports the Box-Cox transform and the\n Yeo-Johnson transform. The optimal parameter for stabilizing variance and\n minimizing skewness is estimated through maximum likelihood.\n\n Box-Cox requires input data to be strictly positive, while Yeo-Johnson\n supports both positive or negative data.\n\n By default, zero-mean, unit-variance normalization is applied to the\n transformed data.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'\n The power transform method. Available methods are:\n\n - 'yeo-johnson' [1]_, works with positive and negative values\n - 'box-cox' [2]_, only works with strictly positive values\n\n .. versionchanged:: 0.23\n The default value of the `method` parameter changed from\n 'box-cox' to 'yeo-johnson' in 0.23.\n\n standardize : bool, default=True\n Set to True to apply zero-mean, unit-variance normalization to the\n transformed output.\n\n copy : bool, default=True\n Set to False to perform inplace computation during transformation.\n\n Returns\n -------\n X_trans : ndarray of shape (n_samples, n_features)\n The transformed data.\n\n See Also\n --------\n PowerTransformer : Equivalent transformation with the\n Transformer API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n\n quantile_transform : Maps data to a standard normal distribution with\n the parameter `output_distribution='normal'`.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in ``fit``, and maintained\n in ``transform``.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n References\n ----------\n\n .. [1] I.K. Yeo and R.A. Johnson, \"A new family of power transformations to\n improve normality or symmetry.\" Biometrika, 87(4), pp.954-959,\n (2000).\n\n .. [2] G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal\n of the Royal Statistical Society B, 26, 211-252 (1964).\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import power_transform\n >>> data = [[1, 2], [3, 2], [4, 5]]\n >>> print(power_transform(data, method='box-cox'))\n [[-1.332... -0.707...]\n [ 0.256... -0.707...]\n [ 1.076... 1.414...]]\n\n .. warning:: Risk of data leak.\n Do not use :func:`~sklearn.preprocessing.power_transform` unless you\n know what you are doing. A common mistake is to apply it to the entire\n data *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.PowerTransformer` within a\n :ref:`Pipeline <pipeline>` in order to prevent most risks of data\n leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),\n LogisticRegression())`.\n ", "language": "en", "n_whitespaces": 771, "n_words": 421, "vocab_size": 267 }
def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True): pt = PowerTransformer(method=method, standardize=standardize, copy=copy) return pt.fit_transform(X)
41,742
176,172
29
networkx/generators/small.py
17
5
def desargues_graph(create_using=None): G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) G.name = "Desargues Graph" return G
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
desargues_graph
dec723f072eb997a497a159dbe8674cd39999ee9
networkx
small.py
10
4
https://github.com/networkx/networkx.git
1
37
0
15
58
Python
{ "docstring": "\n Returns the Desargues Graph\n\n The Desargues Graph is a non-planar, distance-transitive cubic graph\n with 20 nodes and 30 edges [1]_.\n It is a symmetric graph. It can be represented in LCF notation\n as [5,-5,9,-9]^5 [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Desargues Graph with 20 nodes and 30 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Desargues_graph\n .. [2] https://mathworld.wolfram.com/DesarguesGraph.html\n ", "language": "en", "n_whitespaces": 139, "n_words": 77, "vocab_size": 56 }
def desargues_graph(create_using=None): G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) G.name = "Desargues Graph" return G
1,175
7,282
180
ludwig/schema/features/utils.py
36
7
def get_output_feature_jsonschema(): output_feature_types = sorted(list(output_type_registry.keys())) return { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"}, "type": {"type": "string", "enum": output_feature_types}, "column": {"type": "string"}, }, "additionalProperties": True, "allOf": get_outpu
Input/Output Feature Schema Refactor (#2147) * Added base files and some initial code * More files created, fleshing out binary feature and corresponding encoders * Added more schema infra * Registered all feature encoders * Separated feature utils infra * Added all preprocessing classes * Filled out rest of schema configs * Fixed preproc dataclass * Fixed small errors blocking import * Tests should be passing * Deleted unnecesssary files and removed commented out code * fixed flake8 * Fixed most tests * fixed pattern validation * Fixed missing val strategies and solved custom encoder update issue * Removed preprocessing from features due to schema SSOT * fix flake 8 * fix flake 8 * fix flake 8 * Using encoder/decoder registries * Address NIT * Address feedback * Adding constants, remove computed_fill_value, swapped in registries * Addressed Feedback * Flake8 * Making tied a constant * Added base feature classes * Added parameter metadata for computed fill value * Small fix * Add pattern back into string * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
get_output_feature_jsonschema
6909ae16047d422b94ed4cbd1a753e6b34540ff9
ludwig
utils.py
14
16
https://github.com/ludwig-ai/ludwig.git
1
85
0
28
167
Python
{ "docstring": "This function returns a JSON schema structured to only requires a `type` key and then conditionally applies\n a corresponding output feature's field constraints.\n\n Returns: JSON Schema\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 23 }
def get_output_feature_jsonschema(): output_feature_types = sorted(list(output_type_registry.keys())) return { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"}, "type": {"type": "string", "enum": output_feature_types}, "column": {"type": "string"}, }, "additionalProperties": True, "allOf": get_output_feature_conds(), "required": ["name", "type"], }, }
43,114
180,242
82
demo/blocks_component_shortcut/run.py
29
14
def greet(str): return str with gr.Blocks() as demo: with gr.Row(): text1 = gr.component("textarea") text2 = gr.TextArea() text3 = gr.templates.TextArea() text1.change(greet, text1, text2) text2.change(greet, text2, text3) text3.change(greet, text3, text1) demo
update-shortcut-syntax (#1234) * update-shortcut-syntax - fix&update gr.component - create a demo introducing shortcuts within Blocks * update-shortcut-syntax - tweaks * update-shortcut-syntax - tweaks * update-shortcut-syntax - fix formatting * update-shortcut-syntax - tweaks - fix tests * update-shortcut-syntax - tweaks - fix tests * update-shortcut-syntax - tweaks - fix tests
greet
2de9ee8bfb43dc1f6d71e16ed1fe18ea164edd4c
gradio
run.py
11
2
https://github.com/gradio-app/gradio.git
1
7
0
26
141
Python
{ "docstring": "\n You can make use of str shortcuts you use in Interface within Blocks as well.\n \n Interface shortcut example:\n Interface(greet, \"textarea\", \"textarea\")\n \n You can use \n 1. gr.component()\n 2. gr.templates.Template()\n 3. gr.Template()\n All the templates are listed in gradio/templates.py\n ", "language": "en", "n_whitespaces": 74, "n_words": 37, "vocab_size": 31 }
def greet(str): return str with gr.Blocks() as demo: with gr.Row(): text1 = gr.component("textarea") text2 = gr.TextArea() text3 = gr.templates.TextArea() text1.change(greet, text1, text2) text2.change(greet, text2, text3) text3.change(greet, text3, text1) demo.launch()
43,601
181,817
84
tpot/base.py
23
10
def _impute_values(self, features): if self.verbosity > 1:
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
_impute_values
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
base.py
12
7
https://github.com/EpistasisLab/tpot.git
3
53
0
21
91
Python
{ "docstring": "Impute missing values in a feature set.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n A feature matrix\n\n Returns\n -------\n array-like {n_samples, n_features}\n ", "language": "en", "n_whitespaces": 81, "n_words": 21, "vocab_size": 17 }
def _impute_values(self, features): if self.verbosity > 1: print("Imputing missing values in feature set") if self._fitted_imputer is None: self._fitted_imputer = SimpleImputer(strategy="median") self._fitted_imputer.fit(features) return self._fitted_imputer.transform(features)
50,700
204,336
70
django/contrib/sites/shortcuts.py
34
9
def get_current_site(request): # Import is inside the function because its point is to avoid importing the # Site models when django.contrib.sites isn't installed. if apps.is_installed("django.contrib.sites"): from
Refs #33476 -- Reformatted code with Black.
get_current_site
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
shortcuts.py
10
6
https://github.com/django/django.git
2
35
0
29
66
Python
{ "docstring": "\n Check if contrib.sites is installed and return either the current\n ``Site`` object or a ``RequestSite`` object based on the request.\n ", "language": "en", "n_whitespaces": 30, "n_words": 20, "vocab_size": 18 }
def get_current_site(request): # Import is inside the function because its point is to avoid importing the # Site models when django.contrib.sites isn't installed. if apps.is_installed("django.contrib.sites"): from .models import Site return Site.objects.get_current(request) else: return RequestSite(request)
77,782
264,670
235
netbox/extras/scripts.py
103
28
def get_scripts(use_names=False): scripts = OrderedDict() # Iterate through all modules within the scripts path. These are the user-created files in which reports are # defined. for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]): # Remove cached module to ensu
Save old JobResults
get_scripts
f13a00b2dd33bffc3048c861b494096df457f212
netbox
scripts.py
12
17
https://github.com/netbox-community/netbox.git
11
156
0
66
247
Python
{ "docstring": "\n Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human-\n defined name in place of the actual module name.\n ", "language": "en", "n_whitespaces": 39, "n_words": 29, "vocab_size": 26 }
def get_scripts(use_names=False): scripts = OrderedDict() # Iterate through all modules within the scripts path. These are the user-created files in which reports are # defined. for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]): # Remove cached module to ensure consistency with filesystem if module_name in sys.modules: del sys.modules[module_name] module = importer.find_module(module_name).load_module(module_name) if use_names and hasattr(module, 'name'): module_name = module.name module_scripts = OrderedDict() script_order = getattr(module, "script_order", ()) ordered_scripts = [cls for cls in script_order if is_script(cls)] unordered_scripts = [cls for _, cls in inspect.getmembers(module, is_script) if cls not in script_order] for cls in [*ordered_scripts, *unordered_scripts]: module_scripts[cls.__name__] = cls if module_scripts: scripts[module_name] = module_scripts return scripts
78,268
266,015
74
netbox/extras/plugins/templates.py
24
9
def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError("extra_context must be a dictionary") return get_template(template_name).render({**self.context, *
Reorganize plugin resources
render
e7f54c5867cf49126bbf95e28633e4283c2bbcb2
netbox
templates.py
11
6
https://github.com/netbox-community/netbox.git
3
53
0
23
87
Python
{ "docstring": "\n Convenience method for rendering the specified Django template using the default context data. An additional\n context dictionary may be passed as `extra_context`.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 20 }
def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError("extra_context must be a dictionary") return get_template(template_name).render({**self.context, **extra_context})
@keras_export("keras.applications.mobilenet_v3.decode_predictions")
82,630
278,617
11
keras/applications/mobilenet_v3.py
6
4
def preprocess_input(x, data_format=None): return x @keras_export("keras.applications.
Remove pylint comments. PiperOrigin-RevId: 452353044
preprocess_input
3613c3defc39c236fb1592c4f7ba1a9cc887343a
keras
mobilenet_v3.py
7
2
https://github.com/keras-team/keras.git
1
12
1
6
32
Python
{ "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the mobilenet_v3 model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "language": "en", "n_whitespaces": 152, "n_words": 95, "vocab_size": 76 }
def preprocess_input(x, data_format=None): return x @keras_export("keras.applications.mobilenet_v3.decode_predictions")
@register.simple_tag
16,502
76,338
10
wagtail/admin/templatetags/wagtailadmin_tags.py
5
7
def message_level_tag(message): return MESSAGE_TAGS.get(message.level)
Prevent custom MESSAGE_TAGS settings from leaking into admin styles Fixes a test failure against Django main. In #2552, a fix was applied to ensure that the project-level MESSAGE_TAGS setting was ignored, allowing end-users to customise that setting for their own projects without it leaking into Wagtail admin styles. Unfortunately, the test was flawed (or was broken in a Django regression at some point): in Django <=4.0, MESSAGE_TAGS was not affected by override_settings after the first request, which meant that unless the test was run in isolation, the custom classname that was supposed to flag up the problem never got applied, and the test always succeeded. The change to SVG icons broke the intent of #2552, since it used message.level_tag for the icon's classname (and this picks up MESSAGE_TAGS customisations), but due to the broken test this went unnoticed. https://github.com/django/django/commit/24b316536a7ee4c54a54f632de1852aecb4038c0 fixed the override_settings behaviour, making the test fail as it should have done long ago. Here we adjust the test to not rely on override_settings (so that it does what it's supposed to do on all Django versions), fix a test that gets broken as a side effect (because it's unnecessarily checking message.level_tag), and fixes our SVG-icon-powered message include to bypass the MESSAGE_TAGS setting like the old implementation did. Confusing? Yes.
message_level_tag
1838fbfb1a720e0a286c989dbdea03dfde6af4a5
wagtail
wagtailadmin_tags.py
8
2
https://github.com/wagtail/wagtail.git
1
15
1
5
34
Python
{ "docstring": "\n Return the tag for this message's level as defined in\n django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level\n MESSAGE_TAGS setting (which end-users might customise).\n ", "language": "en", "n_whitespaces": 33, "n_words": 20, "vocab_size": 19 }
def message_level_tag(message): return MESSAGE_TAGS.get(message.level) @register.simple_tag
75,848
259,648
422
sklearn/metrics/_regression.py
141
19
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"): check_cons
ENH add D2 pinbal score and D2 absolute error score (#22118)
_check_reg_targets
aeeac1c1d634dc80abc93fb30b3fe48e1d709b64
scikit-learn
_regression.py
17
35
https://github.com/scikit-learn/scikit-learn.git
10
234
0
93
371
Python
{ "docstring": "Check that y_true and y_pred belong to the same regression task.\n\n Parameters\n ----------\n y_true : array-like\n\n y_pred : array-like\n\n multioutput : array-like or string in ['raw_values', uniform_average',\n 'variance_weighted'] or None\n None is accepted due to backward compatibility of r2_score().\n\n dtype : str or list, default=\"numeric\"\n the dtype argument passed to check_array.\n\n Returns\n -------\n type_true : one of {'continuous', continuous-multioutput'}\n The type of the true target data, as output by\n 'utils.multiclass.type_of_target'.\n\n y_true : array-like of shape (n_samples, n_outputs)\n Ground truth (correct) target values.\n\n y_pred : array-like of shape (n_samples, n_outputs)\n Estimated target values.\n\n multioutput : array-like of shape (n_outputs) or string in ['raw_values',\n uniform_average', 'variance_weighted'] or None\n Custom output weights if ``multioutput`` is array-like or\n just the corresponding argument if ``multioutput`` is a\n correct keyword.\n ", "language": "en", "n_whitespaces": 240, "n_words": 124, "vocab_size": 70 }
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"): check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError( "y_true and y_pred have different number of output ({0}!={1})".format( y_true.shape[1], y_pred.shape[1] ) ) n_outputs = y_true.shape[1] allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted") if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError( "Allowed 'multioutput' string values are {}. " "You provided multioutput={!r}".format( allowed_multioutput_str, multioutput ) ) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError("Custom weights are useful only in multi-output cases.") elif n_outputs != len(multioutput): raise ValueError( "There must be equally many custom weights (%d) as outputs (%d)." % (len(multioutput), n_outputs) ) y_type = "continuous" if n_outputs == 1 else "continuous-multioutput" return y_type, y_true, y_pred, multioutput
31,945
140,391
417
python/ray/serve/deployment.py
73
18
def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]:
[Deployment Graph] Simplify our use of DeploymentSchema (#25202)
bind
820cf4fdcae6b274588e23b312d5255d1b418e10
ray
deployment.py
14
33
https://github.com/ray-project/ray.git
4
128
0
46
183
Python
{ "docstring": "Bind the provided arguments and return a class or function node.\n\n The returned bound deployment can be deployed or bound to other\n deployments to create a deployment graph.\n ", "language": "en", "n_whitespaces": 49, "n_words": 28, "vocab_size": 23 }
def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: copied_self = copy(self) copied_self._func_or_class = "dummpy.module" schema_shell = deployment_to_schema(copied_self) if inspect.isfunction(self._func_or_class): return FunctionNode( self._func_or_class, args, # Used to bind and resolve DAG only, can take user input kwargs, # Used to bind and resolve DAG only, can take user input self._ray_actor_options or dict(), other_args_to_resolve={ "deployment_schema": schema_shell, "is_from_serve_deployment": True, }, ) else: return ClassNode( self._func_or_class, args, kwargs, cls_options=self._ray_actor_options or dict(), other_args_to_resolve={ "deployment_schema": schema_shell, "is_from_serve_deployment": True, }, )
20,605
101,184
150
tools/manual/faceviewer/viewport.py
24
15
def _obtain_mask(cls, detected_face, mask_type): mas
lib.detected_face.Mask - Add source + target offset and coverage to set_sub_crop method
_obtain_mask
32950897376b48e0f08b46385602e4df902cf49e
faceswap
viewport.py
12
10
https://github.com/deepfakes/faceswap.git
3
77
0
21
126
Python
{ "docstring": " Obtain the mask for the correct \"face\" centering that is used in the thumbnail display.\n\n Parameters\n -----------\n detected_face: :class:`lib.align.DetectedFace`\n The Detected Face object to obtain the mask for\n mask_type: str\n The type of mask to obtain\n\n Returns\n -------\n :class:`numpy.ndarray` or ``None``\n The single channel mask of requested mask type, if it exists, otherwise ``None``\n ", "language": "en", "n_whitespaces": 144, "n_words": 54, "vocab_size": 40 }
def _obtain_mask(cls, detected_face, mask_type): mask = detected_face.mask.get(mask_type) if not mask: return None if mask.stored_centering != "face": face = AlignedFace(detected_face.landmarks_xy) mask.set_sub_crop(face.pose.offset[mask.stored_centering], face.pose.offset["face"], centering="face") return mask.mask.squeeze()
118,284
322,908
337
examples/model_interpretation/task/senti/rnn/model.py
104
33
def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <[email protected]>
forward
93cae49c0c572b5c1ac972759140fbe924b0374d
PaddleNLP
model.py
14
18
https://github.com/PaddlePaddle/PaddleNLP.git
2
211
0
70
329
Python
{ "docstring": "\n Args:\n input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence.\n mask (paddle.Tensor) of shape (batch, seq_len) :\n Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not. \n Defaults to `None`.\n ", "language": "en", "n_whitespaces": 113, "n_words": 45, "vocab_size": 34 }
def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask = paddle.cast(mask, dtype='float32') mask = mask.unsqueeze(axis=-1) inf_tensor = paddle.full( shape=mask.shape, dtype='float32', fill_value=-INF) att_score = paddle.multiply(att_score, mask) + paddle.multiply( inf_tensor, (1 - mask)) # Shape: (batch_size, max_seq_len, 1) att_weight = F.softmax(att_score, axis=1) # Shape: (batch_size, lstm_hidden_size) reps = paddle.bmm(h.transpose(perm=(0, 2, 1)), att_weight).squeeze(axis=-1) reps = paddle.tanh(reps) return reps, att_weight
70,242
244,108
133
mmdet/models/dense_heads/maskformer_head.py
33
15
def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results
[Enhance] MaskFormer refactor (#7471) * maskformer refactor update docstring update docstring update unit test update unit test update unit test * remove redundant code * update unit test
simple_test
4bb184bae070f37febb10f82bee3a217dc1ad7c5
mmdetection
maskformer_head.py
11
11
https://github.com/open-mmlab/mmdetection.git
1
80
0
27
125
Python
{ "docstring": "Test without augmentaton.\n\n Args:\n feats (list[Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two tensors.\n\n - mask_cls_results (Tensor): Mask classification logits,\\\n shape (batch_size, num_queries, cls_out_channels).\n Note `cls_out_channels` should includes background.\n - mask_pred_results (Tensor): Mask logits, shape \\\n (batch_size, num_queries, h, w).\n ", "language": "en", "n_whitespaces": 191, "n_words": 55, "vocab_size": 49 }
def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results
52,490
208,731
308
IPython/core/ultratb.py
76
25
def _format_list(self, extracted_list): Colors = self.Colors list = [] for ind, (filename, lineno, name, line) in enumerate(extracted_list): normalCol, nameCol, fileCol, lineCol = ( # Emphasize the last entry (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line) if ind == len(extracted_list) - 1 else (Colors.Normal, Colors.name, Colors.filename, "") ) fns = _format_filename(filename, fileCol, normalCol, lineno=lineno) item = f"{normalCol} {fns}" if name != "<module>": item += f" in {nameCol}{name}{normalCol}\n"
Restore lineno's for Input mapped files (#13560) * Implement lineno's for Input mapped files * Adopt In [123], line 123 format * Revert "Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841" (This reverts commit d11e987f174a15f1640f8006c86f58d884c3faa4.) * Omit mention of ", in <module>" for input tracebacks * Input cell -> Cell * Remove <module> from traceback doctests * Use f-string for `in ...' format * Simplify _format_list logic, converting to f-strings
_format_list
a72418e2dcdfc3c91f70d724d16d2691a41c9c24
ipython
ultratb.py
14
19
https://github.com/ipython/ipython.git
5
134
0
61
248
Python
{ "docstring": "Format a list of traceback entry tuples for printing.\n\n Given a list of tuples as returned by extract_tb() or\n extract_stack(), return a list of strings ready for printing.\n Each string in the resulting list corresponds to the item with the\n same index in the argument list. Each string ends in a newline;\n the strings may contain internal newlines as well, for those items\n whose source text line is not None.\n\n Lifted almost verbatim from traceback.py\n ", "language": "en", "n_whitespaces": 132, "n_words": 75, "vocab_size": 53 }
def _format_list(self, extracted_list): Colors = self.Colors list = [] for ind, (filename, lineno, name, line) in enumerate(extracted_list): normalCol, nameCol, fileCol, lineCol = ( # Emphasize the last entry (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line) if ind == len(extracted_list) - 1 else (Colors.Normal, Colors.name, Colors.filename, "") ) fns = _format_filename(filename, fileCol, normalCol, lineno=lineno) item = f"{normalCol} {fns}" if name != "<module>": item += f" in {nameCol}{name}{normalCol}\n" else: item += "\n" if line: item += f"{lineCol} {line.strip()}{normalCol}\n" list.append(item) return list
47,530
196,030
351
sympy/calculus/euler.py
146
37
def euler_equations(L, funcs=(), vars=()): r funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs:
Updated import locations
euler_equations
498015021131af4dbb07eb110e5badaba8250c7b
sympy
euler.py
19
86
https://github.com/sympy/sympy.git
18
281
0
84
436
Python
{ "docstring": "\n Find the Euler-Lagrange equations [1]_ for a given Lagrangian.\n\n Parameters\n ==========\n\n L : Expr\n The Lagrangian that should be a function of the functions listed\n in the second argument and their derivatives.\n\n For example, in the case of two functions `f(x,y)`, `g(x,y)` and\n two independent variables `x`, `y` the Lagrangian would have the form:\n\n .. math:: L\\left(f(x,y),g(x,y),\\frac{\\partial f(x,y)}{\\partial x},\n \\frac{\\partial f(x,y)}{\\partial y},\n \\frac{\\partial g(x,y)}{\\partial x},\n \\frac{\\partial g(x,y)}{\\partial y},x,y\\right)\n\n In many cases it is not necessary to provide anything, except the\n Lagrangian, it will be auto-detected (and an error raised if this\n couldn't be done).\n\n funcs : Function or an iterable of Functions\n The functions that the Lagrangian depends on. The Euler equations\n are differential equations for each of these functions.\n\n vars : Symbol or an iterable of Symbols\n The Symbols that are the independent variables of the functions.\n\n Returns\n =======\n\n eqns : list of Eq\n The list of differential equations, one for each function.\n\n Examples\n ========\n\n >>> from sympy import euler_equations, Symbol, Function\n >>> x = Function('x')\n >>> t = Symbol('t')\n >>> L = (x(t).diff(t))**2/2 - x(t)**2/2\n >>> euler_equations(L, x(t), t)\n [Eq(-x(t) - Derivative(x(t), (t, 2)), 0)]\n >>> u = Function('u')\n >>> x = Symbol('x')\n >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2\n >>> euler_equations(L, u(t, x), [t, x])\n [Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)]\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation\n\n ", "language": "en", "n_whitespaces": 454, "n_words": 224, "vocab_size": 139 }
def euler_equations(L, funcs=(), vars=()): r funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs: if not vars == f.args: raise ValueError("Variables %s do not match args: %s" % (vars, f)) order = max([len(d.variables) for d in L.atoms(Derivative) if d.expr in funcs] + [0]) eqns = [] for f in funcs: eq = diff(L, f) for i in range(1, order + 1): for p in combinations_with_replacement(vars, i): eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p) new_eq = Eq(eq, 0) if isinstance(new_eq, Eq): eqns.append(new_eq) return eqns
17,162
81,161
207
awx/main/tasks/callback.py
65
11
def delay_update(self, skip_if_already_set=False, **kwargs): for key, value in kwargs.items(): if key in self.extra_update_fields and skip_if_already_set: continue elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'): if str(value) in self.extra_update_fields.get(key, ''): continue # if already set, avoid duplicating messages # In the case of these fields, we do not want to lose any prior information, so combine valu
Delay update of artifacts and error fields until final job save (#11832) * Delay update of artifacts until final job save Save tracebacks from receptor module to callback object Move receptor traceback check up to be more logical Use new mock_me fixture to avoid DB call with me method Update the special runner message to the delay_update pattern * Move special runner message into post-processing of callback fields
delay_update
452744b67e02823879e722fe574984a2d760ed60
awx
callback.py
18
10
https://github.com/ansible/awx.git
7
105
0
50
174
Python
{ "docstring": "Stash fields that should be saved along with the job status change", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def delay_update(self, skip_if_already_set=False, **kwargs): for key, value in kwargs.items(): if key in self.extra_update_fields and skip_if_already_set: continue elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'): if str(value) in self.extra_update_fields.get(key, ''): continue # if already set, avoid duplicating messages # In the case of these fields, we do not want to lose any prior information, so combine values self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)]) else: self.extra_update_fields[key] = value
12,428
61,179
164
.venv/lib/python3.8/site-packages/pip/_internal/utils/filesystem.py
68
26
def adjacent_tmp_file(path, **kwargs): # type: (str, **Any) -> Iterator[BinaryIO] with NamedTemporaryFile( delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix=".tmp", **kwargs, ) as f: result = cast(BinaryIO, f) try: yield result finally: result.flush() os.fsync(result.fileno()) # Tenacity raises RetryError by default, explictly raise the original exception _replace_retry = retry(reraise=True,
upd; format
adjacent_tmp_file
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
filesystem.py
14
14
https://github.com/jindongwang/transferlearning.git
2
78
0
60
180
Python
{ "docstring": "Return a file-like object pointing to a tmp file next to path.\n\n The file is created securely and is ensured to be written to disk\n after the context reaches its end.\n\n kwargs will be passed to tempfile.NamedTemporaryFile to control\n the way the temporary file will be opened.\n ", "language": "en", "n_whitespaces": 62, "n_words": 47, "vocab_size": 33 }
def adjacent_tmp_file(path, **kwargs): # type: (str, **Any) -> Iterator[BinaryIO] with NamedTemporaryFile( delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix=".tmp", **kwargs, ) as f: result = cast(BinaryIO, f) try: yield result finally: result.flush() os.fsync(result.fileno()) # Tenacity raises RetryError by default, explictly raise the original exception _replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) replace = _replace_retry(os.replace) # test_writable_dir and _test_writable_dir_win are copied from Flit, # with the author's agreement to also place them under pip's license.
3,195
20,046
176
pipenv/patched/notpip/_vendor/distro.py
45
17
def _lsb_release_info(self): # type: () -> Dict[str, str] if not self.include_lsb: return {} with open(os.devnull, "wb") as devnull: try: cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=devnull) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_lsb_release_info
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
distro.py
13
11
https://github.com/pypa/pipenv.git
3
79
0
38
138
Python
{ "docstring": "\n Get the information items from the lsb_release command output.\n\n Returns:\n A dictionary containing all information items.\n ", "language": "en", "n_whitespaces": 49, "n_words": 16, "vocab_size": 14 }
def _lsb_release_info(self): # type: () -> Dict[str, str] if not self.include_lsb: return {} with open(os.devnull, "wb") as devnull: try: cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=devnull) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content)
38,299
159,507
107
rasa/engine/graph.py
35
16
def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}} for node
fix type annotation in rasa.engine
as_dict
9fc462da870f69f9976be3bc081675844b9f64c2
rasa
graph.py
12
12
https://github.com/RasaHQ/rasa.git
2
72
0
28
137
Python
{ "docstring": "Returns graph schema in a serializable format.\n\n Returns:\n The graph schema in a format which can be dumped as JSON or other formats.\n ", "language": "en", "n_whitespaces": 48, "n_words": 23, "vocab_size": 19 }
def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}} for node_name, node in self.nodes.items(): serializable = dataclasses.asdict(node) # Classes are not JSON serializable (surprise) serializable["uses"] = f"{node.uses.__module__}.{node.uses.__name__}" serializable_graph_schema["nodes"][node_name] = serializable return serializable_graph_schema
51,065
205,284
240
django/db/migrations/executor.py
52
20
def _create_project_state(self, with_applied_migrations=False): state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True
Refs #33476 -- Reformatted code with Black.
_create_project_state
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
executor.py
14
15
https://github.com/django/django.git
6
101
0
42
157
Python
{ "docstring": "\n Create a project state including all the applications without\n migrations and applied migrations if with_applied_migrations=True.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def _create_project_state(self, with_applied_migrations=False): state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state
@control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', )
52,249
208,224
90
celery/worker/control.py
58
14
def revoke(state, task_id, terminate=False, signal=None, **kwargs): # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_
New control command `revoke_by_stamped_headers` (#7838) * Added pytest-order==1.0.1 * Added a new control command `revoke_by_stamped_headers` to revoke tasks by their stamped header instead of task id (terminate only works on running tasks in memory)
revoke
5092598fb88c1f18e3fe709861cdb31df90a7264
celery
control.py
12
4
https://github.com/celery/celery.git
2
56
1
51
115
Python
{ "docstring": "Revoke task by task id (or list of ids).\n\n Keyword Arguments:\n terminate (bool): Also terminate the process if the task is active.\n signal (str): Name of signal to use for terminate (e.g., ``KILL``).\n ", "language": "en", "n_whitespaces": 53, "n_words": 33, "vocab_size": 26 }
def revoke(state, task_id, terminate=False, signal=None, **kwargs): # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) return ok(f'tasks {task_ids} flagged as revoked') @control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', )
76,660
261,119
100
sklearn/utils/sparsefuncs.py
47
12
def inplace_swap_row_csc(X, m, n): for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError("m and n should be valid integers") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.i
DOC Ensures that inplace_swap_row_csc passes numpydoc validation (#24513)
inplace_swap_row_csc
affb0cb49412eb5992d2fad0d765b50a2db1344c
scikit-learn
sparsefuncs.py
12
11
https://github.com/scikit-learn/scikit-learn.git
5
87
0
32
135
Python
{ "docstring": "Swap two rows of a CSC matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two rows are to be swapped. It should be of\n CSC format.\n\n m : int\n Index of the row of X to be swapped.\n\n n : int\n Index of the row of X to be swapped.\n ", "language": "en", "n_whitespaces": 102, "n_words": 56, "vocab_size": 31 }
def inplace_swap_row_csc(X, m, n): for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError("m and n should be valid integers") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.indices == n] = m X.indices[m_mask] = n
16,058
73,591
139
wagtail/contrib/table_block/tests.py
29
8
def test_render_empty_table(self):
Reformat with black
test_render_empty_table
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
tests.py
12
19
https://github.com/wagtail/wagtail.git
1
67
0
22
105
Python
{ "docstring": "\n An empty table should render okay.\n \n <table>\n <tbody>\n <tr><td></td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n </tbody>\n </table>\n ", "language": "en", "n_whitespaces": 145, "n_words": 13, "vocab_size": 11 }
def test_render_empty_table(self): block = TableBlock() result = block.render( { "first_row_is_table_header": False, "first_col_is_header": False, "data": [[None, None, None], [None, None, None], [None, None, None]], } ) expected = self.assertHTMLEqual(result, expected)
47,876
196,376
56
sympy/matrices/dense.py
21
8
def rot_axis3(theta): ct = cos(theta)
Moved imports to higher level
rot_axis3
59d22b6bb7287613d598611027f640d068ca5748
sympy
dense.py
9
7
https://github.com/sympy/sympy.git
1
51
0
18
76
Python
{ "docstring": "Returns a rotation matrix for a rotation of theta (in radians) about\n the 3-axis.\n\n Examples\n ========\n\n >>> from sympy import pi, rot_axis3\n\n A rotation of pi/3 (60 degrees):\n\n >>> theta = pi/3\n >>> rot_axis3(theta)\n Matrix([\n [ 1/2, sqrt(3)/2, 0],\n [-sqrt(3)/2, 1/2, 0],\n [ 0, 0, 1]])\n\n If we rotate by pi/2 (90 degrees):\n\n >>> rot_axis3(pi/2)\n Matrix([\n [ 0, 1, 0],\n [-1, 0, 0],\n [ 0, 0, 1]])\n\n See Also\n ========\n\n rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)\n about the 1-axis\n rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)\n about the 2-axis\n ", "language": "en", "n_whitespaces": 208, "n_words": 100, "vocab_size": 49 }
def rot_axis3(theta): ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), (-st, ct, 0), (0, 0, 1)) return Matrix(lil)
47,592
196,092
56
sympy/combinatorics/graycode.py
17
9
def current(self): rv = self._current or '0' if not isinstance(rv, str): rv = bin(rv)[2:] return rv.rjust(self.n, '0')
Updated import locations
current
498015021131af4dbb07eb110e5badaba8250c7b
sympy
graycode.py
11
5
https://github.com/sympy/sympy.git
3
43
0
15
74
Python
{ "docstring": "\n Returns the currently referenced Gray code as a bit string.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> GrayCode(3, start='100').current\n '100'\n ", "language": "en", "n_whitespaces": 71, "n_words": 21, "vocab_size": 20 }
def current(self): rv = self._current or '0' if not isinstance(rv, str): rv = bin(rv)[2:] return rv.rjust(self.n, '0')
120,738
335,307
360
src/diffusers/models/unet_sde_score_estimation.py
210
30
def upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) output_padding = ( output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = _shape(x, 1) // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) # Original TF code. # x
add score estimation model
upsample_conv_2d
ac796924dff7241d9b516ea27faaa7b2f12434fd
diffusers
unet_sde_score_estimation.py
14
25
https://github.com/huggingface/diffusers.git
4
356
0
114
547
Python
{ "docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the\n operations.\n The fused op is considerably more efficient than performing the same\n calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels =\n x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to\n nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or\n `[N, H * factor, W * factor, C]`, and same datatype as `x`.\n ", "language": "en", "n_whitespaces": 280, "n_words": 139, "vocab_size": 102 }
def upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) output_padding = ( output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = _shape(x, 1) // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) # Original TF code. # x = tf.nn.conv2d_transpose( # x, # w, # output_shape=output_shape, # strides=stride, # padding='VALID', # data_format=data_format) # JAX equivalent return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
3,837
21,441
175
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
32
11
def read(self, size=None): if size is None: t = [] while True: buf = self._read(self.bufsize
Vendor in pip 22.1.2
read
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
tarfile.py
13
13
https://github.com/pypa/pipenv.git
4
71
0
25
121
Python
{ "docstring": "Return the next size number of bytes from the stream.\n If size is not defined, return all bytes of the stream\n up to EOF.\n ", "language": "en", "n_whitespaces": 51, "n_words": 24, "vocab_size": 19 }
def read(self, size=None): if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf
553
3,767
89
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_streams.py
16
8
def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value,
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
state
a3aae8017a0a40ff2006e2567f71dccb04c997a5
airbyte
base_streams.py
10
8
https://github.com/airbytehq/airbyte.git
2
38
0
15
61
Python
{ "docstring": "State getter, get current state and serialize it to emmit Airbyte STATE message", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value, "include_deleted": self._include_deleted, } return {}
55,965
220,322
55
python3.10.4/Lib/asyncio/base_events.py
23
6
def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must b
add python 3.10.4 for windows
set_task_factory
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
base_events.py
10
4
https://github.com/XX-net/XX-Net.git
3
30
0
20
52
Python
{ "docstring": "Set a task factory that will be used by loop.create_task().\n\n If factory is None the default task factory will be set.\n\n If factory is a callable, it should have a signature matching\n '(loop, coro)', where 'loop' will be a reference to the active\n event loop, 'coro' will be a coroutine object. The callable\n must return a Future.\n ", "language": "en", "n_whitespaces": 100, "n_words": 57, "vocab_size": 39 }
def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') self._task_factory = factory
39,394
163,186
79
pandas/core/arrays/datetimes.py
37
10
def date(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, #
DOC: Improve doc summaries in series.rst (#45237)
date
521259299f7829da667ba39302ec77acedde9e5e
pandas
datetimes.py
9
9
https://github.com/pandas-dev/pandas.git
1
31
0
32
56
Python
{ "docstring": "\n Returns numpy array of python :class:`datetime.date` objects.\n\n Namely, the date part of Timestamps without time and\n timezone information.\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 17 }
def date(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box="date")
69,952
243,004
172
src/PIL/Image.py
54
14
def apply_transparency(self): if self.mode != "P" or "transparency" not in self.info: return from . import ImagePalette palette = self.getpalette("RGBA") transparency = self.info["transparency"] if isinstance(tra
Added apply_transparency()
apply_transparency
11be1631433f252b816802aef1a3cd109bd308c7
Pillow
Image.py
13
14
https://github.com/python-pillow/Pillow.git
5
110
0
41
186
Python
{ "docstring": "\n If a P mode image has a \"transparency\" key in the info dictionary,\n remove the key and apply the transparency to the palette instead.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 19 }
def apply_transparency(self): if self.mode != "P" or "transparency" not in self.info: return from . import ImagePalette palette = self.getpalette("RGBA") transparency = self.info["transparency"] if isinstance(transparency, bytes): for i, alpha in enumerate(transparency): palette[i * 4 + 3] = alpha else: palette[transparency * 4 + 3] = 0 self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette)) self.palette.dirty = 1 del self.info["transparency"]
19,002
93,655
19
src/sentry/utils/assets.py
10
9
def get_frontend_app_asset_url(module, key): args = (settings.STATIC_FRONTEND_APP_URL.rstrip("/"), module, key.lstrip("/")) return "{}/{}/{}".format(*
ref(js): Remove broken frontend asset cache busting (#36953)
get_frontend_app_asset_url
2992f33c2d084f2542af647c6b76b54c351cc5a5
sentry
assets.py
10
3
https://github.com/getsentry/sentry.git
1
37
0
10
65
Python
{ "docstring": "\n Returns an asset URL that is unversioned. These assets should have a\n `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin\n server before using their locally cached asset.\n\n Example:\n {% frontend_app_asset_url 'sentry' 'sentry.css' %}\n => \"/_static/dist/sentry/sentry.css\"\n ", "language": "en", "n_whitespaces": 65, "n_words": 38, "vocab_size": 37 }
def get_frontend_app_asset_url(module, key): args = (settings.STATIC_FRONTEND_APP_URL.rstrip("/"), module, key.lstrip("/")) return "{}/{}/{}".format(*args)
26,690
119,812
197
jax/_src/lax/linalg.py
139
19
def tridiagonal_solve(dl, d, du, b): r if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1: raise ValueError('dl, d and du must be vectors') if dl.shape != d.shape or d.shape != du.shape: raise ValueError( f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`') if b.ndim != 2: raise ValueError(f'b={b.shape} must be a matrix') m, = dl.shape if m < 3: raise ValueError(f'm ({m}) must be >= 3') ldb, n = b.shape if ldb < max(1, m): raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})') if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype !=
DOC: add missing linalg functionality to docs
tridiagonal_solve
c66f5dda60aa5df7b6aa2f09d3ce88c4249b6f34
jax
linalg.py
13
39
https://github.com/google/jax.git
13
200
0
90
379
Python
{ "docstring": "Computes the solution of a tridiagonal linear system.\n\n This function computes the solution of a tridiagonal linear system:\n\n .. math::\n A . X = B\n\n Args:\n dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``.\n Note that ``dl[0] = 0``.\n d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``.\n du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``.\n Note that ``dl[m - 1] = 0``.\n b: Right hand side matrix.\n\n Returns:\n Solution ``X`` of tridiagonal system.\n ", "language": "en", "n_whitespaces": 125, "n_words": 91, "vocab_size": 57 }
def tridiagonal_solve(dl, d, du, b): r if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1: raise ValueError('dl, d and du must be vectors') if dl.shape != d.shape or d.shape != du.shape: raise ValueError( f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`') if b.ndim != 2: raise ValueError(f'b={b.shape} must be a matrix') m, = dl.shape if m < 3: raise ValueError(f'm ({m}) must be >= 3') ldb, n = b.shape if ldb < max(1, m): raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})') if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype: raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and ' f'b={b.dtype} must be the same dtype,') t = dl.dtype if t not in (np.float32, np.float64): raise ValueError(f'Only f32/f64 are supported, got {t}') return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t) # Schur Decomposition
39,828
166,568
289
pandas/util/_print_versions.py
72
14
def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ "pandas", # required "numpy", "pytz", "dateutil", # install / build, "setuptools", "pip", "Cython", # test "pytest", "hypothesis", # docs "sphinx", # Other, need a min version "blosc", "feather", "xlsxwriter", "lxml.etree", "html5lib", "pymysql", "psycopg2", "jinja2",
fix pandas.show_versions() and remove pin for setuptools (#47096)
_get_dependency_info
44b660dc4a07f4fb507c31795ae63dca2e6e9440
pandas
_print_versions.py
12
32
https://github.com/pandas-dev/pandas.git
3
106
0
61
191
Python
{ "docstring": "\n Returns dependency information as a JSON serializable dictionary.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ "pandas", # required "numpy", "pytz", "dateutil", # install / build, "setuptools", "pip", "Cython", # test "pytest", "hypothesis", # docs "sphinx", # Other, need a min version "blosc", "feather", "xlsxwriter", "lxml.etree", "html5lib", "pymysql", "psycopg2", "jinja2", # Other, not imported. "IPython", "pandas_datareader", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors="ignore") result[modname] = get_version(mod) if mod else None return result
14,318
66,758
41
erpnext/patches/v13_0/germany_fill_debtor_creditor_number.py
60
16
def execute(): company_list = frappe.get_all("Company", filters={"country": "Germany"}) for company in company_list: party_account_list = frappe.get_all( "Party Account", filters={"company": company.name}, fields=["name", "account", "debtor_creditor_number"], ) for party_account in party_account_list: if (not party_account.account) or party_account.debtor_creditor_number: # account empty or debtor_creditor_number already filled continue account_number = frappe.db.get_value("Account", party_account.account, "account_number") if not account_number: continue frappe.db.set_value( "Party Account", party_account.name
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
germany_fill_debtor_creditor_number.py
14
18
https://github.com/frappe/erpnext.git
6
126
0
46
218
Python
{ "docstring": "Move account number into the new custom field debtor_creditor_number.\n\n\tGerman companies used to use a dedicated payable/receivable account for\n\tevery party to mimick party accounts in the external accounting software\n\t\"DATEV\". This is no longer necessary. The reference ID for DATEV will be\n\tstored in a new custom field \"debtor_creditor_number\".\n\t", "language": "en", "n_whitespaces": 45, "n_words": 50, "vocab_size": 40 }
def execute(): company_list = frappe.get_all("Company", filters={"country": "Germany"}) for company in company_list: party_account_list = frappe.get_all( "Party Account", filters={"company": company.name}, fields=["name", "account", "debtor_creditor_number"], ) for party_account in party_account_list: if (not party_account.account) or party_account.debtor_creditor_number: # account empty or debtor_creditor_number already filled continue account_number = frappe.db.get_value("Account", party_account.account, "account_number") if not account_number: continue frappe.db.set_value( "Party Account", party_account.name, "debtor_creditor_number", account_number ) frappe.db.set_value("Party Account", party_account.name, "account", "")
77,108
262,049
78
TTS/tts/datasets/dataset.py
21
16
def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch
Refactor TTSDataset ⚡️
compute_or_load
176b712c1a40cf630da9a77f1826836723c40fde
TTS
dataset.py
11
7
https://github.com/coqui-ai/TTS.git
2
64
0
18
102
Python
{ "docstring": "\n compute pitch and return a numpy array of pitch values\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file) else: pitch = np.load(pitch_file) return pitch.astype(np.float32)
49,581
200,282
816
sympy/testing/runtests.py
272
33
def check_output(self, want, got, optionflags): # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag
runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy
check_output
6d2bbf80752549276a968fd4af78231c569d55c5
sympy
runtests.py
15
36
https://github.com/sympy/sympy.git
15
276
0
149
459
Python
{ "docstring": "\n Return True iff the actual output from an example (`got`)\n matches the expected output (`want`). These strings are\n always considered to match if they are identical; but\n depending on what option flags the test runner is using,\n several non-exact match types are also possible. See the\n documentation for `TestRunner` for more information about\n option flags.\n ", "language": "en", "n_whitespaces": 114, "n_words": 55, "vocab_size": 46 }
def check_output(self, want, got, optionflags): # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False
16,102
73,775
120
wagtail/core/models/__init__.py
24
20
def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state
Reformat with black
start
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
__init__.py
10
12
https://github.com/wagtail/wagtail.git
1
77
0
20
122
Python
{ "docstring": "Start this task on the provided workflow state by creating an instance of TaskState", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state
14,424
67,084
117
erpnext/regional/germany/utils/datev/datev_csv.py
155
40
def get_datev_csv(data, filters, csv_class): empty_df = pd.DataFrame(columns=csv_class.COLUMNS) data_df = pd.DataFrame.from_records(data) result = empty_df.append(data_df, sort=True) if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS: result["Belegdatum"] = pd.to_datetime(result["
style: format code with black
get_datev_csv
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
datev_csv.py
13
27
https://github.com/frappe/erpnext.git
3
247
0
107
435
Python
{ "docstring": "\n\tFill in missing columns and return a CSV in DATEV Format.\n\n\tFor automatic processing, DATEV requires the first line of the CSV file to\n\thold meta data such as the length of account numbers oder the category of\n\tthe data.\n\n\tArguments:\n\tdata -- array of dictionaries\n\tfilters -- dict\n\tcsv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS\n\t", "language": "en", "n_whitespaces": 48, "n_words": 56, "vocab_size": 42 }
def get_datev_csv(data, filters, csv_class): empty_df = pd.DataFrame(columns=csv_class.COLUMNS) data_df = pd.DataFrame.from_records(data) result = empty_df.append(data_df, sort=True) if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS: result["Belegdatum"] = pd.to_datetime(result["Belegdatum"]) result["Beleginfo - Inhalt 6"] = pd.to_datetime(result["Beleginfo - Inhalt 6"]) result["Beleginfo - Inhalt 6"] = result["Beleginfo - Inhalt 6"].dt.strftime("%d%m%Y") result["Fälligkeit"] = pd.to_datetime(result["Fälligkeit"]) result["Fälligkeit"] = result["Fälligkeit"].dt.strftime("%d%m%y") result.sort_values(by="Belegdatum", inplace=True, kind="stable", ignore_index=True) if csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES: result["Sprach-ID"] = "de-DE" data = result.to_csv( # Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035 sep=";", # European decimal seperator decimal=",", # Windows "ANSI" encoding encoding="latin_1", # format date as DDMM date_format="%d%m", # Windows line terminator line_terminator="\r\n", # Do not number rows index=False, # Use all columns defined above columns=csv_class.COLUMNS, # Quote most fields, even currency values with "," separator quoting=QUOTE_NONNUMERIC, ) data = data.encode("latin_1", errors="replace") header = get_header(filters, csv_class) header = ";".join(header).encode("latin_1", errors="replace") # 1st Row: Header with meta data # 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here. # 3rd - nth Row: Data (Nutzdaten) return header + b"\r\n" + data
49,659
200,453
36
sympy/stats/random_matrix_models.py
21
11
def CircularSymplecticEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularSymplecticEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, di
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
CircularSymplecticEnsemble
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
random_matrix_models.py
9
5
https://github.com/sympy/sympy.git
1
52
0
18
80
Python
{ "docstring": "\n Represents Circular Symplectic Ensembles.\n\n Examples\n ========\n\n >>> from sympy.stats import CircularSymplecticEnsemble as CSE\n >>> from sympy.stats import joint_eigen_distribution\n >>> C = CSE('S', 1)\n >>> joint_eigen_distribution(C)\n Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))\n\n Note\n ====\n\n As can be seen above in the example, density of CiruclarSymplecticEnsemble\n is not evaluated because the exact definition is based on haar measure of\n unitary group which is not unique.\n ", "language": "en", "n_whitespaces": 112, "n_words": 69, "vocab_size": 57 }
def CircularSymplecticEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularSymplecticEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
7,845
43,154
35
airflow/models/taskinstance.py
12
4
def _executor_config_comparator(x, y): try: return x == y except AttributeError: return False
Don't crash scheduler if exec config has old k8s objects (#24117) From time to time k8s library objects change their attrs. If executor config is stored with old version, and unpickled with new version, we can get attribute errors that can crash the scheduler (see https://github.com/apache/airflow/issues/23727). Here we update handling so that we fail the task but don't crash the scheduler.
_executor_config_comparator
0c41f437674f135fe7232a368bf9c198b0ecd2f0
airflow
taskinstance.py
8
5
https://github.com/apache/airflow.git
2
19
0
11
33
Python
{ "docstring": "\n The TaskInstance.executor_config attribute is a pickled object that may contain\n kubernetes objects. If the installed library version has changed since the\n object was originally pickled, due to the underlying ``__eq__`` method on these\n objects (which converts them to JSON), we may encounter attribute errors. In this\n case we should replace the stored object.\n ", "language": "en", "n_whitespaces": 73, "n_words": 53, "vocab_size": 45 }
def _executor_config_comparator(x, y): try: return x == y except AttributeError: return False
87,761
288,605
370
tests/util/test_color.py
112
3
def test_color_temperature_to_rgbww(): # Coldest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == ( 0, 0, 0, 255, 0, ) assert color_util.color_temperatu
Use Kelvin as the preferred color temperature unit (#79591) * Use Kelvin as the preferred white temperature unit * Update homekit * Adjust tests
test_color_temperature_to_rgbww
47d0598e75487f63901931875f69f802a477df13
core
test_color.py
8
43
https://github.com/home-assistant/core.git
1
161
0
34
207
Python
{ "docstring": "Test color temp to warm, cold conversion.\n\n Temperature values must be in mireds\n Home Assistant uses rgbcw for rgbww\n ", "language": "en", "n_whitespaces": 28, "n_words": 19, "vocab_size": 19 }
def test_color_temperature_to_rgbww(): # Coldest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == ( 0, 0, 0, 255, 0, ) assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == ( 0, 0, 0, 128, 0, ) # Warmest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(2000, 255, 2000, 6535) == ( 0, 0, 0, 0, 255, ) assert color_util.color_temperature_to_rgbww(2000, 128, 2000, 6535) == ( 0, 0, 0, 0, 128, ) # Warmer than mid point color temperature -> More warm than cold channel enabled assert color_util.color_temperature_to_rgbww(2881, 255, 2000, 6535) == ( 0, 0, 0, 112, 143, ) assert color_util.color_temperature_to_rgbww(2881, 128, 2000, 6535) == ( 0, 0, 0, 56, 72, )
26,330
118,626
176
lib/tests/streamlit/report_context_test.py
33
20
def test_set_page_config_first(self): fake_enqueue = lambda msg: None
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
test_set_page_config_first
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
report_context_test.py
10
17
https://github.com/streamlit/streamlit.git
1
84
0
26
148
Python
{ "docstring": "st.set_page_config must be called before other st commands\n when the script has been marked as started", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 16 }
def test_set_page_config_first(self): fake_enqueue = lambda msg: None ctx = ScriptRunContext( "TestSessionID", fake_enqueue, "", SessionState(), UploadedFileManager(), ) ctx.on_script_start() markdown_msg = ForwardMsg() markdown_msg.delta.new_element.markdown.body = "foo" msg = ForwardMsg() msg.page_config_changed.title = "foo" ctx.enqueue(markdown_msg) with self.assertRaises(StreamlitAPIException): ctx.enqueue(msg)
21,290
101,909
150
lib/gui/display_command.py
38
15
def set_vars(self) -> None: tk_vars = super().set_vars() smoothgraph = tk.DoubleVar() smoothgraph.set(0.900) tk_vars["smoothg
Typing - lib.gui.display_command
set_vars
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
faceswap
display_command.py
10
25
https://github.com/deepfakes/faceswap.git
1
103
0
24
177
Python
{ "docstring": " Add graphing specific variables to the default variables.\n\n Overrides original method.\n\n Returns\n -------\n dict\n The variable names with their corresponding tkinter variable\n ", "language": "en", "n_whitespaces": 69, "n_words": 22, "vocab_size": 21 }
def set_vars(self) -> None: tk_vars = super().set_vars() smoothgraph = tk.DoubleVar() smoothgraph.set(0.900) tk_vars["smoothgraph"] = smoothgraph raw_var = tk.BooleanVar() raw_var.set(True) tk_vars["raw_data"] = raw_var smooth_var = tk.BooleanVar() smooth_var.set(True) tk_vars["smooth_data"] = smooth_var iterations_var = tk.IntVar() iterations_var.set(10000) tk_vars["display_iterations"] = iterations_var logger.debug(tk_vars) return tk_vars
11,975
60,026
226
src/prefect/infrastructure/kubernetes.py
82
9
def _configure_kubernetes_library_client(self) -> None: # TODO: Investigate returning a configured client so calls on other threads # will not invalidate the config needed here # if a k8s cluster block is provided to the flow runner, use that if self.cluster_config: self.cluster_config.configure_client() else: # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't # work, try to load the configuration from the local environment, a
Use cluster uid and namespace instead of cluster "name" for Kubernetes job identifiers (#7747) Co-authored-by: peytonrunyan <[email protected]> Co-authored-by: Peyton <[email protected]>
_configure_kubernetes_library_client
0c9ee0876133bde14ce070a89557fc31cd905bac
prefect
kubernetes.py
14
14
https://github.com/PrefectHQ/prefect.git
3
45
0
62
85
Python
{ "docstring": "\n Set the correct kubernetes client configuration.\n\n WARNING: This action is not threadsafe and may override the configuration\n specified by another `KubernetesJob` instance.\n ", "language": "en", "n_whitespaces": 61, "n_words": 22, "vocab_size": 21 }
def _configure_kubernetes_library_client(self) -> None: # TODO: Investigate returning a configured client so calls on other threads # will not invalidate the config needed here # if a k8s cluster block is provided to the flow runner, use that if self.cluster_config: self.cluster_config.configure_client() else: # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't # work, try to load the configuration from the local environment, allowing # any further ConfigExceptions to bubble up. try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: kubernetes.config.load_kube_config()
19,261
95,966
657
tests/sentry/api/endpoints/test_project_rules.py
116
34
def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator): self.login_as(user=self.user) project = self.create_project() self.create_sentry_app( name="Pied Piper", organization=project.organization, schema={"elements": [self.create_alert_rule_action_schema()]}, ) install = self.create_sentry_app_installation( slug="pied-piper", organization=project.organization ) actions = [ { "id": "sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction", "settings": [ {"name": "title", "value": "Team Rocket"}, {"name": "summary", "value": "We're blasting off again."}, ], "sentryAppInstallationUuid": install.uuid, "hasSchemaFormConfig": True, }, ] url = reverse( "sentry-api-0-proj
feat(alert-rule-action): New data structure for alert-rule-action settings (#31444) Objective: Originally the issue was with serializing the settings field for alert webhooks and fighting with the serializers. Instead we decided to convert the dictionary to an array of dictionaries with keys name and value.
test_runs_alert_rule_action_creator
3c8b4477340a7fe276c57c9b598c161b309c4fbd
sentry
test_project_rules.py
15
51
https://github.com/getsentry/sentry.git
1
291
0
91
513
Python
{ "docstring": "\n Ensures that Sentry Apps with schema forms (UI components)\n receive a payload when an alert rule is created with them.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 19 }
def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator): self.login_as(user=self.user) project = self.create_project() self.create_sentry_app( name="Pied Piper", organization=project.organization, schema={"elements": [self.create_alert_rule_action_schema()]}, ) install = self.create_sentry_app_installation( slug="pied-piper", organization=project.organization ) actions = [ { "id": "sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction", "settings": [ {"name": "title", "value": "Team Rocket"}, {"name": "summary", "value": "We're blasting off again."}, ], "sentryAppInstallationUuid": install.uuid, "hasSchemaFormConfig": True, }, ] url = reverse( "sentry-api-0-project-rules", kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug}, ) response = self.client.post( url, data={ "name": "my super cool rule", "owner": f"user:{self.user.id}", "conditions": [], "filters": [], "actions": actions, "filterMatch": "any", "actionMatch": "any", "frequency": 30, }, format="json", ) assert response.status_code == 200, response.content assert response.data["id"] rule = Rule.objects.get(id=response.data["id"]) assert rule.data["actions"] == actions kwargs = { "install": install, "fields": actions[0].get("settings"), } call_kwargs = mock_alert_rule_action_creator.call_args[1] assert call_kwargs["install"].id == kwargs["install"].id assert call_kwargs["fields"] == kwargs["fields"]
115,587
317,011
987
homeassistant/components/icloud/account.py
199
43
def _determine_interval(self) -> int: intervals = {"default": self._max_interval} for device in self._devices.values(): # Max interval if no location if device.location is None: continue current_zone = run_callback_threadsafe( self.hass.loop, async_active_zone, self.hass, device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY], ).result() # Max interval if in zone if current_zone is not None: continue zones = ( self.hass.states.get(entity_id) for entity_id in sorted(self.hass.states.entity_ids("zone")) ) distances = [] for zone_state in zones: if zone_state is None: continue zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE] zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE] zone_distance = distance(
Remove icloud from mypy ignore list (#75007)
_determine_interval
6ac05784a63f7490f875959139ef903034bc45b0
core
account.py
16
52
https://github.com/home-assistant/core.git
13
290
0
120
454
Python
{ "docstring": "Calculate new interval between two API fetch (in minutes).", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _determine_interval(self) -> int: intervals = {"default": self._max_interval} for device in self._devices.values(): # Max interval if no location if device.location is None: continue current_zone = run_callback_threadsafe( self.hass.loop, async_active_zone, self.hass, device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY], ).result() # Max interval if in zone if current_zone is not None: continue zones = ( self.hass.states.get(entity_id) for entity_id in sorted(self.hass.states.entity_ids("zone")) ) distances = [] for zone_state in zones: if zone_state is None: continue zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE] zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE] zone_distance = distance( device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], zone_state_lat, zone_state_long, ) if zone_distance is not None: distances.append(round(zone_distance / 1000, 1)) # Max interval if no zone if not distances: continue mindistance = min(distances) # Calculate out how long it would take for the device to drive # to the nearest zone at 120 km/h: interval = round(mindistance / 2) # Never poll more than once per minute interval = max(interval, 1) if interval > 180: # Three hour drive? # This is far enough that they might be flying interval = self._max_interval if ( device.battery_level is not None and device.battery_level <= 33 and mindistance > 3 ): # Low battery - let's check half as often interval = interval * 2 intervals[device.name] = interval return max( int(min(intervals.items(), key=operator.itemgetter(1))[1]), self._max_interval, )
4,164
22,084
93
pipenv/patched/pip/_vendor/requests/models.py
25
11
def prepare_cookies(self, cookies): if isinstance(cookies, cookielib.CookieJar
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
prepare_cookies
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
models.py
11
8
https://github.com/pypa/pipenv.git
3
57
0
18
93
Python
{ "docstring": "Prepares the given HTTP cookie data.\n\n This function eventually generates a ``Cookie`` header from the\n given cookies using cookielib. Due to cookielib's design, the header\n will not be regenerated if it already exists, meaning this function\n can only be called once for the life of the\n :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls\n to ``prepare_cookies`` will have no actual effect, unless the \"Cookie\"\n header is removed beforehand.\n ", "language": "en", "n_whitespaces": 122, "n_words": 66, "vocab_size": 54 }
def prepare_cookies(self, cookies): if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers["Cookie"] = cookie_header
7,355
40,216
88
dash/testing/browser.py
23
11
def wait_for_contains_text(self, selector, text, timeout=None): return self._wait_for( method=contains_text, args=(selector, text), timeout=timeout, msg=f"text -> {text} not found inside element within {timeout or self._wait_timeout}s", )
f-strings everywhere! fffff
wait_for_contains_text
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
dash
browser.py
12
7
https://github.com/plotly/dash.git
1
41
0
23
73
Python
{ "docstring": "Explicit wait until the element's text contains the expected `text`.\n\n timeout if not set, equals to the fixture's `wait_timeout`\n shortcut to `WebDriverWait` with customized `contains_text`\n condition.\n ", "language": "en", "n_whitespaces": 54, "n_words": 26, "vocab_size": 23 }
def wait_for_contains_text(self, selector, text, timeout=None): return self._wait_for( method=contains_text, args=(selector, text), timeout=timeout, msg=f"text -> {text} not found inside element within {timeout or self._wait_timeout}s", )
49,035
198,681
99
sympy/physics/continuum_mechanics/truss.py
23
10
def remove_member(self, label): if label not in list(self._members): raise ValueError("No such member exists in the Truss") else:
default values for supports and loads removed along with other changes
remove_member
73b2975a89b45ef437f11b697d39796f755a856b
sympy
truss.py
16
8
https://github.com/sympy/sympy.git
2
104
0
22
162
Python
{ "docstring": "\n This method removes a member from the given truss.\n\n Parameters\n ==========\n label: String or Symbol\n The label for the member to be removed.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.add_node('C', 2, 2)\n >>> t.add_member('AB', 'A', 'B')\n >>> t.add_member('AC', 'A', 'C')\n >>> t.add_member('BC', 'B', 'C')\n >>> t.members\n {'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']}\n >>> t.remove_member('AC')\n >>> t.members\n {'AB': ['A', 'B'], 'BC': ['B', 'C']}\n ", "language": "en", "n_whitespaces": 231, "n_words": 79, "vocab_size": 55 }
def remove_member(self, label): if label not in list(self._members): raise ValueError("No such member exists in the Truss") else: self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]])) self._nodes_occupied.pop(tuple([self._members[label][1], self._members[label][0]])) self._members.pop(label) self._internal_forces.pop(label)
26,306
118,583
84
lib/tests/server_test_case.py
24
15
def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value="mock_id:%s" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_sessi
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
_create_mock_app_session
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
server_test_case.py
11
8
https://github.com/streamlit/streamlit.git
1
57
0
19
93
Python
{ "docstring": "Create a mock AppSession. Each mocked instance will have\n its own unique ID.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value="mock_id:%s" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session
7,605
42,543
34
nltk/parse/util.py
17
4
def taggedsents_to_conll(sentences): for sentence in sentences: yield from taggedsent_to_conll(sentence) yield "\n\n" #############################################################
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
taggedsents_to_conll
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
nltk
util.py
10
4
https://github.com/nltk/nltk.git
2
19
0
15
41
Python
{ "docstring": "\n A module to convert the a POS tagged document stream\n (i.e. list of list of tuples, a list of sentences) and yield lines\n in CONLL format. This module yields one line per word and two newlines\n for end of sentence.\n\n >>> from nltk import word_tokenize, sent_tokenize, pos_tag\n >>> text = \"This is a foobar sentence. Is that right?\"\n >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)]\n >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE\n ... if line:\n ... print(line, end=\"\")\n 1\tThis\t_\tDT\tDT\t_\t0\ta\t_\t_\n 2\tis\t_\tVBZ\tVBZ\t_\t0\ta\t_\t_\n 3\ta\t_\tDT\tDT\t_\t0\ta\t_\t_\n 4\tfoobar\t_\tJJ\tJJ\t_\t0\ta\t_\t_\n 5\tsentence\t_\tNN\tNN\t_\t0\ta\t_\t_\n 6\t.\t\t_\t.\t.\t_\t0\ta\t_\t_\n <BLANKLINE>\n <BLANKLINE>\n 1\tIs\t_\tVBZ\tVBZ\t_\t0\ta\t_\t_\n 2\tthat\t_\tIN\tIN\t_\t0\ta\t_\t_\n 3\tright\t_\tNN\tNN\t_\t0\ta\t_\t_\n 4\t?\t_\t.\t.\t_\t0\ta\t_\t_\n <BLANKLINE>\n <BLANKLINE>\n\n :param sentences: Input sentences to parse\n :type sentence: list(list(tuple(str, str)))\n :rtype: iter(str)\n :return: a generator yielding sentences in CONLL format.\n ", "language": "en", "n_whitespaces": 214, "n_words": 204, "vocab_size": 91 }
def taggedsents_to_conll(sentences): for sentence in sentences: yield from taggedsent_to_conll(sentence) yield "\n\n" ###################################################################### # { Test Suites ######################################################################
19,953
100,480
244
plugins/train/model/phaze_a.py
82
18
def _get_input_shape(self): arch = self.config["enc_architecture"] enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weig
Phaze-A: Add MobileNetV3 encoder
_get_input_shape
0189029dbaad486e623353ee4a8451af8c85f4e4
faceswap
phaze_a.py
17
16
https://github.com/deepfakes/faceswap.git
4
139
0
60
232
Python
{ "docstring": " Obtain the input shape for the model.\n\n Input shape is calculated from the selected Encoder's input size, scaled to the user\n selected Input Scaling, rounded down to the nearest 16 pixels.\n\n Notes\n -----\n Some models (NasNet) require the input size to be of a certain dimension if loading\n imagenet weights. In these instances resize inputs and raise warning message\n\n Returns\n -------\n tuple\n The shape tuple for the input size to the Phaze-A model\n ", "language": "en", "n_whitespaces": 155, "n_words": 73, "vocab_size": 53 }
def _get_input_shape(self): arch = self.config["enc_architecture"] enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weights", False) default_size = _MODEL_MAPPING[arch]["default_size"] scaling = self.config["enc_scaling"] / 100 min_size = _MODEL_MAPPING[arch].get("min_size", 32) size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16))) if self.config["enc_load_weights"] and enforce_size and scaling != 1.0: logger.warning("%s requires input size to be %spx when loading imagenet weights. " "Adjusting input size from %spx to %spx", arch, default_size, size, default_size) retval = (default_size, default_size, 3) else: retval = (size, size, 3) logger.debug("Encoder input set to: %s", retval) return retval
16,543
76,578
572
wagtail/admin/panels.py
148
12
def get_form_options(self): options = {} if not getattr(self.widget_overrides, "is_original_method", False): warn( "The `widget_overrides` method (on %r) is deprecated;
Introduce a get_form_options method to combine widget_overrides / required_fields / required_formsets / field_permissions
get_form_options
ae79eb4cb29b84bb8379fcf0957e6837164c5933
wagtail
panels.py
12
35
https://github.com/wagtail/wagtail.git
5
168
0
60
300
Python
{ "docstring": "\n Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets'\n which should be incorporated into the form class definition to generate a form\n that this EditHandler can use.\n This will only be called after binding to a model (i.e. self.model is available).\n ", "language": "en", "n_whitespaces": 79, "n_words": 43, "vocab_size": 38 }
def get_form_options(self): options = {} if not getattr(self.widget_overrides, "is_original_method", False): warn( "The `widget_overrides` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`widgets` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["widgets"] = self.widget_overrides() if not getattr(self.required_fields, "is_original_method", False): warn( "The `required_fields` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`fields` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["fields"] = self.required_fields() if not getattr(self.required_formsets, "is_original_method", False): warn( "The `required_formsets` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`formsets` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["formsets"] = self.required_formsets() if not getattr(self.field_permissions, "is_original_method", False): warn( "The `field_permissions` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`field_permissions` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["field_permissions"] = self.field_permissions() return options # RemovedInWagtail219Warning - edit handlers should override get_form_options instead
12,267
60,730
101
.venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py
29
10
def evaluate_links(self, link_evaluator, links): # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator,
upd; format
evaluate_links
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
package_finder.py
11
7
https://github.com/jindongwang/transferlearning.git
3
48
0
26
77
Python
{ "docstring": "\n Convert links that are candidates to InstallationCandidate objects.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def evaluate_links(self, link_evaluator, links): # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates
35,855
154,199
70
modin/core/storage_formats/base/query_compiler.py
20
7
def columnarize(self):
REFACTOR-#4796: Introduce constant for __reduced__ column name (#4799) Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Jonathan Shi <[email protected]>
columnarize
3f985ed6864cc1b5b587094d75ca5b2695e4139f
modin
query_compiler.py
12
6
https://github.com/modin-project/modin.git
4
44
0
17
72
Python
{ "docstring": "\n Transpose this QueryCompiler if it has a single row but multiple columns.\n\n This method should be called for QueryCompilers representing a Series object,\n i.e. ``self.is_series_like()`` should be True.\n\n Returns\n -------\n BaseQueryCompiler\n Transposed new QueryCompiler or self.\n ", "language": "en", "n_whitespaces": 97, "n_words": 36, "vocab_size": 32 }
def columnarize(self): if len(self.columns) != 1 or ( len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL ): return self.transpose() return self
12,305
60,858
34
.venv/lib/python3.8/site-packages/pip/_internal/models/wheel.py
13
6
def get_formatted_file_tags(self): # type: () -> List[str] return sorted(str(tag) for tag in self.
upd; format
get_formatted_file_tags
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
wheel.py
9
2
https://github.com/jindongwang/transferlearning.git
2
20
0
13
35
Python
{ "docstring": "Return the wheel's tags as a sorted list of strings.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def get_formatted_file_tags(self): # type: () -> List[str] return sorted(str(tag) for tag in self.file_tags)
39,090
161,892
555
tests/test_syntax.py
85
18
def test_python_render(): syntax = Panel.fit( Syntax( CODE, lexer="python", line_numbers=True, line_range=(2, 10), theme="monokai", code_width=60, word_wrap=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭─────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39
fix for syntax measure
test_python_render
ac69488768e9c54cdef26e45b26a1b42ebf2f5d3
rich
test_syntax.py
12
17
https://github.com/Textualize/rich.git
1
69
0
57
289
Python
{ "docstring": "Iterate and generate a tuple with a flag for first \\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[48;2;39;40;34m \\x1b[0m\\x1b[38;2;230;219;116;48;2;39;40;34mand last value.", "language": "en", "n_whitespaces": 19, "n_words": 15, "vocab_size": 14 }
def test_python_render(): syntax = Panel.fit( Syntax( CODE, lexer="python", line_numbers=True, line_range=(2, 10), theme="monokai", code_width=60, word_wrap=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭─────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n╰─────────────────────────────────────────────────────────────────╯\n' assert rendered_syntax == expected
102,133
303,311
151
tests/components/recorder/test_history.py
73
22
def test_state_changes_during_period_multiple_entities_single_test(hass_recorder): hass = hass_recorder() start = dt_util.utcnow() test_entites = {f"sensor.{i}": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 hist[entity_id][0].state == value hist = history.state_change
Fix state_changes_during_period history query when no entities are passed (#73139)
test_state_changes_during_period_multiple_entities_single_test
de2e9b6d77adb7f86c6ec4aa0a50428ec8606dc3
core
test_history.py
11
18
https://github.com/home-assistant/core.git
6
183
0
32
284
Python
{ "docstring": "Test state change during period with multiple entities in the same test.\n\n This test ensures the sqlalchemy query cache does not\n generate incorrect results.\n ", "language": "en", "n_whitespaces": 33, "n_words": 24, "vocab_size": 23 }
def test_state_changes_during_period_multiple_entities_single_test(hass_recorder): hass = hass_recorder() start = dt_util.utcnow() test_entites = {f"sensor.{i}": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 hist[entity_id][0].state == value hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value
51,586
206,612
91
django/utils/dateformat.py
37
7
def O(self): # NOQA: E743, E741 if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" seconds = sel
Refs #33476 -- Reformatted code with Black.
O
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
dateformat.py
10
7
https://github.com/django/django.git
3
43
0
27
93
Python
{ "docstring": "\n Difference to Greenwich time in hours; e.g. '+0200', '-0430'.\n\n If timezone information is not available, return an empty string.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
def O(self): # NOQA: E743, E741 if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" seconds = self.Z() sign = "-" if seconds < 0 else "+" seconds = abs(seconds) return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
17,087
80,656
337
awx/main/utils/common.py
155
16
def convert_mem_str_to_bytes(mem_str): # If there is no suffix, the memory sourced from the request is in bytes if mem_str.isdigit(): return int(mem_str) conversions = { 'Ei': lambda x: x * 2**60, 'E': lambda x: x * 1
Fixup conversion of memory and cpu settings to support k8s resource request format (#11725) fix memory and cpu settings to suport k8s resource request format * fix conversion of memory setting to bytes This setting has not been getting set by default, and needed some fixing up to be compatible with setting the memory in the same way as we set it in the operator, as well as with other changes from last year which assume that ansible runner is returning memory in bytes. This way we can start setting this setting in the operator, and get a more accurate reflection of how much memory is available to the control pod in k8s. On platforms where services are all sharing memory, we deduct a penalty from the memory available. On k8s we don't need to do this because the web, redis, and task containers each have memory allocated to them. * Support CPU setting expressed in units used by k8s This setting has not been getting set by default, and needed some fixing up to be compatible with setting the CPU resource request/limits in the same way as we set it in the resource requests/limits. This way we can start setting this setting in the operator, and get a more accurate reflection of how much cpu is available to the control pod in k8s. Because cpu on k8s can be partial cores, migrate cpu field to decimal. k8s does not allow granularity of less than 100m (equivalent to 0.1 cores), so only store up to 1 decimal place. fix analytics to deal with decimal cpu need to use DjangoJSONEncoder when Decimal fields in data passed to json.dumps
convert_mem_str_to_bytes
799968460d4794bcd9959f57a2b97846b9a00bb7
awx
common.py
14
29
https://github.com/ansible/awx.git
6
234
0
86
400
Python
{ "docstring": "Convert string with suffix indicating units to memory in bytes (base 2)\n\n Useful for dealing with memory setting that may be expressed in units compatible with\n kubernetes.\n\n See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory\n ", "language": "en", "n_whitespaces": 41, "n_words": 29, "vocab_size": 24 }
def convert_mem_str_to_bytes(mem_str): # If there is no suffix, the memory sourced from the request is in bytes if mem_str.isdigit(): return int(mem_str) conversions = { 'Ei': lambda x: x * 2**60, 'E': lambda x: x * 10**18, 'Pi': lambda x: x * 2**50, 'P': lambda x: x * 10**15, 'Ti': lambda x: x * 2**40, 'T': lambda x: x * 10**12, 'Gi': lambda x: x * 2**30, 'G': lambda x: x * 10**9, 'Mi': lambda x: x * 2**20, 'M': lambda x: x * 10**6, 'Ki': lambda x: x * 2**10, 'K': lambda x: x * 10**3, } mem = 0 mem_unit = None for i, char in enumerate(mem_str): if not char.isdigit(): mem_unit = mem_str[i:] mem = int(mem_str[:i]) break if not mem_unit or mem_unit not in conversions.keys(): error = f"Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte" logger.warning(error) return 1 return max(1, conversions[mem_unit](mem))
19,015
93,732
525
src/sentry/integrations/jira_server/integration.py
103
37
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue["fields"]["project"] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project["id"], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return
ref(Jira): Split Jira Cloud and Jira Server (#37034) * Split Jira Cloud and Jira Server
sync_status_outbound
2fbf550ec05c8501cbc9eca62e73526e717dcbdf
sentry
integration.py
17
36
https://github.com/getsentry/sentry.git
8
213
0
81
352
Python
{ "docstring": "\n Propagate a sentry issue's status to a linked issue's status.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 8 }
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue["fields"]["project"] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project["id"], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return jira_status = ( external_project.resolved_status if is_resolved else external_project.unresolved_status ) # don't bother updating if it's already the status we'd change it to if jira_issue["fields"]["status"]["id"] == jira_status: return try: transitions = client.get_transitions(external_issue.key) except ApiHostError: raise IntegrationError("Could not reach host to get transitions.") try: transition = [t for t in transitions if t.get("to", {}).get("id") == jira_status][0] except IndexError: # TODO(jess): Email for failure logger.warning( "jira.status-sync-fail", extra={ "organization_id": external_issue.organization_id, "integration_id": external_issue.integration_id, "issue_key": external_issue.key, }, ) return client.transition_issue(external_issue.key, transition["id"])
23,225
108,514
166
lib/matplotlib/axes/_base.py
44
17
def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError("ContourSet must be in current Axes") elif im
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <[email protected]>
_sci
032316bc6c7798fca6c82de24167c975f237687f
matplotlib
_base.py
12
11
https://github.com/matplotlib/matplotlib.git
4
81
0
33
130
Python
{ "docstring": "\n Set the current image.\n\n This image will be the target of colormap functions like\n ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The\n current image is an attribute of the current Axes.\n ", "language": "en", "n_whitespaces": 68, "n_words": 31, "vocab_size": 24 }
def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError("ContourSet must be in current Axes") elif im not in self._children: raise ValueError("Argument must be an image, collection, or " "ContourSet in this Axes") self._current_image = im
36,665
156,512
26
dask/typing.py
12
5
def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: raise NotImplementedError("Inheriting c
Collection Protocol (#8674) [PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be: - usage with static type checkers - other activity in this area at - #8295 - #8706 - #8854 - Python supporting IDEs take advantage of typing - self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page. - purely opt-in feature The `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base that uses Dask collections; for example: ```python >>> from dask.typing import DaskCollection >>> import dask.array as da >>> x = da.zeros((10, 3)) >>> isinstance(x, DaskCollection) True ``` (though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO) Something else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.
__dask_postpersist__
1e783d9a714160e968936cb22d54d085959ab09e
dask
typing.py
8
21
https://github.com/dask/dask.git
1
18
0
12
32
Python
{ "docstring": "Rebuilder function and optional arguments to contruct a persisted collection.\n\n Returns\n -------\n PostPersistCallable\n Callable that rebuilds the collection. The signature\n should be\n ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, str] | None)``.\n The callable should return an equivalent Dask collection\n with the same keys as `self`, but with results that are\n computed through a different graph. In the case of\n :py:func:`dask.persist`, the new graph will have just the\n output keys and the values already computed.\n tuple[Any, ...]\n Optional arugments passed to the rebuild callable. If no\n additional arguments are to be passed then this must be an\n empty tuple.\n\n ", "language": "en", "n_whitespaces": 254, "n_words": 98, "vocab_size": 76 }
def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: raise NotImplementedError("Inheriting class must implement this method.")
13,627
64,407
21
erpnext/patches/v4_2/repost_reserved_qty.py
32
11
def execute(): for doctype in ("Sales Order Item", "Bin
fix: avoid creating bins without item-wh Co-Authored-By: Shadrak Gurupnor <[email protected]> Co-Authored-By: Saurabh <[email protected]>
execute
c36bd7e1a6fe48c5fff4765e843571a0d6560dd1
erpnext
repost_reserved_qty.py
13
30
https://github.com/frappe/erpnext.git
5
70
0
29
118
Python
{ "docstring": "\n\t\tselect\n\t\t\tdistinct item_code, warehouse\n\t\tfrom\n\t\t\t(\n\t\t\t\t(\n\t\t\t\t\tselect distinct item_code, warehouse\n\t\t\t\t\t\t\t\tfrom `tabSales Order Item` where docstatus=1\n\t\t\t\t) UNION (\n\t\t\t\t\tselect distinct item_code, warehouse\n\t\t\t\t\tfrom `tabPacked Item` where docstatus=1 and parenttype='Sales Order'\n\t\t\t\t)\n\t\t\t) so_item\n\t\twhere\n\t\t\texists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1)\n\tdelete from tabBin\n\t\twhere exists(\n\t\t\tselect name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0\n\t\t)\n\t", "language": "en", "n_whitespaces": 44, "n_words": 62, "vocab_size": 31 }
def execute(): for doctype in ("Sales Order Item", "Bin"): frappe.reload_doctype(doctype) repost_for = frappe.db.sql() for item_code, warehouse in repost_for: if not (item_code and warehouse): continue update_bin_qty(item_code, warehouse, { "reserved_qty": get_reserved_qty(item_code, warehouse) }) frappe.db.sql()
5,291
30,043
85
saleor/account/migrations/0071_group.py
26
12
def rename_group_tables_reverse(apps, schema_editor): Group = apps.get_model("auth", "Group") schema_editor.alter_db_table( Group, "account_group", "auth_group", ) PermissionGroup = Group.permissions.through schema_editor.alter_db_table( PermissionGroup, "account_group_permissions"
Drop Djanog Auth
rename_group_tables_reverse
72c120ae8eeb34e5a3f9840fb1ab1de1fca52fb5
saleor
0071_group.py
9
13
https://github.com/saleor/saleor.git
1
46
0
20
100
Python
{ "docstring": "\nALTER TABLE account_group RENAME CONSTRAINT account_group_pkey\n TO auth_group_pkey;\n\nALTER TABLE account_group RENAME CONSTRAINT account_group_name_key\n TO auth_group_name_key;\n\nALTER INDEX IF EXISTS account_group_name_034e9f3f_like\n RENAME TO auth_group_name_a6ea08ec_like;\n\nALTER TABLE auth_group_permissions\n ADD CONSTRAINT auth_group_permissions_group_id_permission_id_0cd325b0_uniq\n UNIQUE (group_id, permission_id);\n\nALTER TABLE auth_group_permissions\n ADD CONSTRAINT auth_group_permissions_group_id_b120cbf9_fk_auth_group_id\n FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE auth_group_permissions\n ADD CONSTRAINT auth_group_permissio_permission_id_84c5c92e_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE account_user_groups\n ADD CONSTRAINT userprofile_user_groups_group_id_c7eec74e_fk_auth_group_id\n FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE account_user_user_permissions\n ADD CONSTRAINT userprofile_user_use_permission_id_1caa8a71_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE app_app_permissions\n ADD CONSTRAINT account_serviceaccou_permission_id_449791f0_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE app_appextension_permissions\n ADD CONSTRAINT app_appextension_per_permission_id_cb6c3ce0_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE app_appinstallation_permissions\n ADD CONSTRAINT app_appinstallation__permission_id_4ee9f6c8_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n", "language": "en", "n_whitespaces": 199, "n_words": 138, "vocab_size": 44 }
def rename_group_tables_reverse(apps, schema_editor): Group = apps.get_model("auth", "Group") schema_editor.alter_db_table( Group, "account_group", "auth_group", ) PermissionGroup = Group.permissions.through schema_editor.alter_db_table( PermissionGroup, "account_group_permissions", "auth_group_permissions", ) RENAME_CONSTRAINTS_AND_INDEX_REVERSE = DROP_OLD_CONSTRAINTS_REVERSE_FROM_0072 = DROP_OLD_CONSTRAINTS_REVERSE_FROM_APP_0018 =
49,897
201,197
29
tests/auth_tests/test_context_processors.py
8
6
def test_session_is_accessed(self):
Refs #33476 -- Reformatted code with Black.
test_session_is_accessed
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_context_processors.py
9
3
https://github.com/django/django.git
1
24
0
8
45
Python
{ "docstring": "\n The session is accessed if the auth context processor\n is used and relevant attributes accessed.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def test_session_is_accessed(self): response = self.client.get("/auth_processor_attr_access/") self.assertContains(response, "Session accessed")
56,628
222,539
206
python3.10.4/Lib/distutils/_msvccompiler.py
71
17
def _find_vc2017(): root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, "Micro
add python 3.10.4 for windows
_find_vc2017
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_msvccompiler.py
16
19
https://github.com/XX-net/XX-Net.git
5
135
0
55
275
Python
{ "docstring": "Returns \"15, path\" based on the result of invoking vswhere.exe\n If no install is found, returns \"None, None\"\n\n The version is returned to avoid unnecessarily changing the function\n result. It may be ignored when the path is not None.\n\n If vswhere.exe is not available, by definition, VS 2017 is not\n installed.\n ", "language": "en", "n_whitespaces": 69, "n_words": 51, "vocab_size": 41 }
def _find_vc2017(): root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), "-latest", "-prerelease", "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", "-property", "installationPath", "-products", "*", ], encoding="mbcs", errors="strict").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None path = os.path.join(path, "VC", "Auxiliary", "Build") if os.path.isdir(path): return 15, path return None, None PLAT_SPEC_TO_RUNTIME = { 'x86' : 'x86', 'x86_amd64' : 'x64', 'x86_arm' : 'arm', 'x86_arm64' : 'arm64' }
81,493
275,868
244
keras/saving/hdf5_format.py
127
21
def save_attributes_to_hdf5_group(group, name, data): # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because they " f"are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
save_attributes_to_hdf5_group
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
hdf5_format.py
13
18
https://github.com/keras-team/keras.git
8
123
0
88
208
Python
{ "docstring": "Saves attributes (data) of the specified name into the HDF5 group.\n\n This method deals with an inherent problem of HDF5 file which is not\n able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to save.\n data: Attributes data to store.\n\n Raises:\n RuntimeError: If any single attribute is too large to be saved.\n ", "language": "en", "n_whitespaces": 106, "n_words": 65, "vocab_size": 49 }
def save_attributes_to_hdf5_group(group, name, data): # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because they " f"are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data
52,639
209,197
742
scapy/layers/tls/record.py
165
34
def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is not None: plen = len(_pkt) if plen >= 2: byte0, byte1 = struct.unpack("BB", _pkt[:2]) s = kargs.get("tls_
Update the TLS13 notebook to spec
dispatch_hook
c96fbb8487051e209dfee788eff857e9ca1fed72
scapy
record.py
19
25
https://github.com/secdev/scapy.git
18
192
0
107
302
Python
{ "docstring": "\n If the TLS class was called on raw SSLv2 data, we want to return an\n SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a\n msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted\n as SSLv2 records but TLS ones instead, but hey, we can't be held\n responsible for low-minded extensibility choices.\n ", "language": "en", "n_whitespaces": 100, "n_words": 57, "vocab_size": 48 }
def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is not None: plen = len(_pkt) if plen >= 2: byte0, byte1 = struct.unpack("BB", _pkt[:2]) s = kargs.get("tls_session", None) if byte0 not in _tls_type or byte1 != 3: # Unknown type # Check SSLv2: either the session is already SSLv2, # either the packet looks like one. As said above, this # isn't 100% reliable, but Wireshark does the same if s and (s.tls_version == 0x0002 or s.advertised_tls_version == 0x0002) or \ (_ssl_looks_like_sslv2(_pkt) and (not s or s.tls_version is None)): from scapy.layers.tls.record_sslv2 import SSLv2 return SSLv2 # Not SSLv2: continuation return _TLSEncryptedContent # Check TLS 1.3 if s and _tls_version_check(s.tls_version, 0x0304): _has_cipher = lambda x: ( x and not isinstance(x.cipher, Cipher_NULL) ) if (_has_cipher(s.rcs) or _has_cipher(s.prcs)) and \ byte0 == 0x17: from scapy.layers.tls.record_tls13 import TLS13 return TLS13 if plen < 5: # Layer detected as TLS but too small to be a # parsed. Scapy should not try to decode them return _TLSEncryptedContent return TLS # Parsing methods
14,164
66,292
12
erpnext/hr/utils.py
20
9
def get_leave_period(from_date, to_date, company): leave_period = frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "company": company}, as_dict=1, )
style: format code with black
get_leave_period
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
utils.py
11
15
https://github.com/frappe/erpnext.git
2
43
0
18
69
Python
{ "docstring": "\n\t\tselect name, from_date, to_date\n\t\tfrom `tabLeave Period`\n\t\twhere company=%(company)s and is_active=1\n\t\t\tand (from_date between %(from_date)s and %(to_date)s\n\t\t\t\tor to_date between %(from_date)s and %(to_date)s\n\t\t\t\tor (from_date < %(from_date)s and to_date > %(to_date)s))\n\t", "language": "en", "n_whitespaces": 25, "n_words": 31, "vocab_size": 19 }
def get_leave_period(from_date, to_date, company): leave_period = frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "company": company}, as_dict=1, ) if leave_period: return leave_period
33,065
143,826
136
rllib/policy/sample_batch.py
49
22
def rows(self) -> Iterator[Dict[str, TensorType]]: # Do we add seq_lens=[1] to each row? seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1]) self_as_dict = {k: v for k, v in self.items()} for i in range(self.count): yield tree.map_structure_with_path( lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens, self_as_dict, )
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
rows
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
sample_batch.py
14
27
https://github.com/ray-project/ray.git
5
95
0
42
144
Python
{ "docstring": "Returns an iterator over data rows, i.e. dicts with column values.\n\n Note that if `seq_lens` is set in self, we set it to [1] in the rows.\n\n Yields:\n The column values of the row in this iteration.\n\n Examples:\n >>> batch = SampleBatch({\n ... \"a\": [1, 2, 3],\n ... \"b\": [4, 5, 6],\n ... \"seq_lens\": [1, 2]\n ... })\n >>> for row in batch.rows():\n print(row)\n {\"a\": 1, \"b\": 4, \"seq_lens\": [1]}\n {\"a\": 2, \"b\": 5, \"seq_lens\": [1]}\n {\"a\": 3, \"b\": 6, \"seq_lens\": [1]}\n ", "language": "en", "n_whitespaces": 247, "n_words": 82, "vocab_size": 58 }
def rows(self) -> Iterator[Dict[str, TensorType]]: # Do we add seq_lens=[1] to each row? seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1]) self_as_dict = {k: v for k, v in self.items()} for i in range(self.count): yield tree.map_structure_with_path( lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens, self_as_dict, )
80,539
270,715
125
keras/engine/base_layer.py
38
10
def _dedup_weights(self, weights): output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_dedup_weights
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
base_layer.py
13
7
https://github.com/keras-team/keras.git
3
49
0
35
83
Python
{ "docstring": "Dedupe weights while maintaining order as much as possible.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def _dedup_weights(self, weights): output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for details.
45,126
185,717
95
src/textual/dom.py
31
11
def ancestors_with_self(self) -> list[DOMNode]: nodes: list[MessagePump | None] = [] add_node = nodes.append node: MessagePump | None = self while node is not None:
Don't include self in DOMNode.ancestors any more As well as dropping `self` from the list that DOMNode.ancestors provides, this commit also adds DOMNode.ancestors_with_self, which maintains the previous behaviour of DOMNode.ancestors.
ancestors_with_self
e3130f95c69648916f121e779a325b6f6f87e6ba
textual
dom.py
9
12
https://github.com/Textualize/textual.git
2
56
0
26
92
Python
{ "docstring": "list[DOMNode]: A list of Nodes by tracing a path all the way back to App.\n\n Note: This is inclusive of ``self``.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 20 }
def ancestors_with_self(self) -> list[DOMNode]: nodes: list[MessagePump | None] = [] add_node = nodes.append node: MessagePump | None = self while node is not None: add_node(node) node = node._parent return cast("list[DOMNode]", nodes)
18,067
85,981
14,028
src/sentry/search/events/datasets/metrics.py
747
53
def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { "name": "metric_id", "fn": lambda args: self.resolve_metric(args["column"]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( "apdex", optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type="number", ), fields.MetricsFunction( "avg", required_args=[ fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "avgIf", [ Column("value"), Function( "equals", [ Column("metric_id"), args["metric_id"], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type="integer", ), fields.MetricsFunction( "count_miserable", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type="integer", ), fields.MetricsFunction( "count_unparameterized_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), self.builder.resolve_tag_value("<< unparameterized >>"), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type="integer", ), fields.MetricsFunction( "count_null_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( "count_has_transaction_name", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "and", [ Function( "notEquals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), Function( "notEquals", [ self.builder.column("transaction"), self.builder.resolve_tag_value( "<< unparameterized >>" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type="integer", ), fields.MetricsFunction( "user_misery", optional_args=[ fields.NullableNumberRange("satisfaction", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange("beta", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type="number", ), fields.MetricsFunction( "p50", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p75", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p90", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p95", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p99", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p100", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "max", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "maxIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "min", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "minIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sum", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias,
fix(mep): Include the column so its countmerge (#39005) - This was causing these results to overcount since we werent merging rows correctly. For the purposes of the endpoint we just needed >0 so it wasn't as noticeable
function_converter
0099fe517a2044e70567e969f19bcf3fa3b26122
sentry
metrics.py
28
548
https://github.com/getsentry/sentry.git
6
2,133
0
202
3,312
Python
{ "docstring": "While the final functions in clickhouse must have their -Merge combinators in order to function, we don't\n need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions\n like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles)\n Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the\n Metric Layer will actually handle which dataset each function goes to\n ", "language": "en", "n_whitespaces": 103, "n_words": 68, "vocab_size": 57 }
def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { "name": "metric_id", "fn": lambda args: self.resolve_metric(args["column"]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( "apdex", optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type="number", ), fields.MetricsFunction( "avg", required_args=[ fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "avgIf", [ Column("value"), Function( "equals", [ Column("metric_id"), args["metric_id"], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type="integer", ), fields.MetricsFunction( "count_miserable", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type="integer", ), fields.MetricsFunction( "count_unparameterized_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), self.builder.resolve_tag_value("<< unparameterized >>"), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type="integer", ), fields.MetricsFunction( "count_null_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( "count_has_transaction_name", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "and", [ Function( "notEquals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), Function( "notEquals", [ self.builder.column("transaction"), self.builder.resolve_tag_value( "<< unparameterized >>" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type="integer", ), fields.MetricsFunction( "user_misery", optional_args=[ fields.NullableNumberRange("satisfaction", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange("beta", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type="number", ), fields.MetricsFunction( "p50", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p75", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p90", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p95", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p99", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p100", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "max", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "maxIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "min", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "minIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sum", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sumIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_counter=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "percentile", required_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), fields.NumberRange("percentile", 0, 1), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_percentile, result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "count_unique", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], calculated_args=[resolve_metric_id], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "uniq", snql_set=lambda args, alias: Function( "uniq", [Column("value")], alias, ), ), fields.MetricsFunction( "uniqIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count_web_vitals", required_args=[ fields.MetricArg( "column", allowed_columns=[ "measurements.fp", "measurements.fcp", "measurements.lcp", "measurements.fid", "measurements.cls", ], allow_custom_measurements=False, ), fields.SnQLStringArg( "quality", allowed_strings=["good", "meh", "poor", "any"] ), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_web_vital_function, default_result_type="integer", ), fields.MetricsFunction( "epm", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), Function("divide", [args["interval"], 60]), ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "eps", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), args["interval"], ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "failure_count", snql_distribution=self._resolve_failure_count, default_result_type="integer", ), fields.MetricsFunction( "failure_rate", snql_distribution=lambda args, alias: Function( "divide", [ self._resolve_failure_count(args), Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), ], alias, ), default_result_type="percentage", ), fields.MetricsFunction( "histogram", required_args=[fields.MetricArg("column")], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_histogram_function, default_result_type="number", private=True, ), ] } for alias, name in constants.FUNCTION_ALIASES.items(): if name in function_converter: function_converter[alias] = function_converter[name].alias_as(alias) return function_converter # Field Aliases
42,682
178,391
2,042
nuitka/freezer/Standalone.py
477
66
def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points): # This is terribly complex, because we check the list of used DLLs # trying to avoid duplicates, and detecting errors with them not # being binary identical, so we can report them. And then of course # we also need to handle OS specifics. # pylint: disable=too-many-branches,too-many-locals,too-many-statements used_dlls = detectUsedDLLs( source_dir=source_dir, standalone_entry_points=standalone_entry_points, use_cache=not Options.shallNotUseDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", update_cache=not Options.shallNotStoreDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", ) removed_dlls = set() warned_about = set() # Fist make checks and remove some. for dll_filename1, sources1 in tuple(iterItems(used_dlls)): if dll_filename1 in removed_dlls: continue for dll_filename2, sources2 in tuple(iterItems(used_dlls)): if dll_fi
UI: In case of PermissionError, allow uses to retry * Esp. on Windows it happens a lot that running programs cannot be updated by Nuitka, this avoids the cryptic error somewhere ranomly.
copyUsedDLLs
2c20b90946a8aa5ad4ee39ad365ff1b83f182770
Nuitka
Standalone.py
19
125
https://github.com/Nuitka/Nuitka.git
34
660
0
262
1,077
Python
{ "docstring": "Colliding DLL names for %s, checking identity of \\\n'%s' <-> '%s'.\\\nIgnoring non-identical DLLs for '%s'.\n%s used by:\n %s\ndifferent from\n%s used by\n %s", "language": "en", "n_whitespaces": 25, "n_words": 27, "vocab_size": 22 }
def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points): # This is terribly complex, because we check the list of used DLLs # trying to avoid duplicates, and detecting errors with them not # being binary identical, so we can report them. And then of course # we also need to handle OS specifics. # pylint: disable=too-many-branches,too-many-locals,too-many-statements used_dlls = detectUsedDLLs( source_dir=source_dir, standalone_entry_points=standalone_entry_points, use_cache=not Options.shallNotUseDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", update_cache=not Options.shallNotStoreDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", ) removed_dlls = set() warned_about = set() # Fist make checks and remove some. for dll_filename1, sources1 in tuple(iterItems(used_dlls)): if dll_filename1 in removed_dlls: continue for dll_filename2, sources2 in tuple(iterItems(used_dlls)): if dll_filename1 == dll_filename2: continue if dll_filename2 in removed_dlls: continue # Colliding basenames are an issue to us. if os.path.basename(dll_filename1) != os.path.basename(dll_filename2): continue # May already have been removed earlier if dll_filename1 not in used_dlls: continue if dll_filename2 not in used_dlls: continue dll_name = os.path.basename(dll_filename1) if Options.isShowInclusion(): inclusion_logger.info( % (dll_name, dll_filename1, dll_filename2) ) # Check that if a DLL has the same name, if it's identical, then it's easy. if haveSameFileContents(dll_filename1, dll_filename2): del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) continue # For Win32 we can check out file versions. if Utils.isWin32Windows(): dll_version1 = getWindowsDLLVersion(dll_filename1) dll_version2 = getWindowsDLLVersion(dll_filename2) if dll_version2 < dll_version1: del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) solved = True elif dll_version1 < dll_version2: del used_dlls[dll_filename1] removed_dlls.add(dll_filename1) solved = True else: solved = False if solved: if dll_name not in warned_about and dll_name not in ms_runtime_dlls: warned_about.add(dll_name) inclusion_logger.warning( "Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best." % dll_name ) continue # So we have conflicting DLLs, in which case we do report the fact. inclusion_logger.warning( % ( dll_name, dll_filename1, "\n ".join(sources1), dll_filename2, "\n ".join(sources2), ) ) del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) dll_map = [] for dll_filename, sources in iterItems(used_dlls): dll_name = os.path.basename(dll_filename) target_path = os.path.join(dist_dir, dll_name) # Sometimes DLL dependencies were copied there already. if not os.path.exists(target_path): copyFile(dll_filename, target_path) dll_map.append((dll_filename, dll_name)) if Options.isShowInclusion(): inclusion_logger.info( "Included used shared library '%s' (used by %s)." % (dll_filename, ", ".join(sources)) ) if Utils.isMacOS(): # For macOS, the binary and the DLLs needs to be changed to reflect # the relative DLL location in the ".dist" folder. for standalone_entry_point in standalone_entry_points: fixupBinaryDLLPathsMacOS( binary_filename=standalone_entry_point.dest_path, dll_map=dll_map, original_location=standalone_entry_point.source_path, ) for original_path, dll_filename in dll_map: fixupBinaryDLLPathsMacOS( binary_filename=os.path.join(dist_dir, dll_filename), dll_map=dll_map, original_location=original_path, ) # Remove code signature from CPython installed library candidate = os.path.join( dist_dir, "Python", ) if os.path.exists(candidate): removeMacOSCodeSignature(candidate) # Remove or update rpath settings. if Utils.getOS() in ("Linux", "Darwin"): # For Linux, the "rpath" of libraries may be an issue and must be # removed. if Utils.isMacOS(): start = 0 else: start = 1 for standalone_entry_point in standalone_entry_points[start:]: count = relpath( path=standalone_entry_point.dest_path, start=dist_dir ).count(os.path.sep) rpath = os.path.join("$ORIGIN", *([".."] * count)) setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath) for _original_path, dll_filename in dll_map: setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), "$ORIGIN") if Utils.isWin32Windows(): if python_version < 0x300: # For Win32, we might have to remove SXS paths for standalone_entry_point in standalone_entry_points[1:]: removeSxsFromDLL(standalone_entry_point.dest_path) for _original_path, dll_filename in dll_map: removeSxsFromDLL(os.path.join(dist_dir, dll_filename))
48,671
197,718
275
sympy/integrals/transforms.py
81
30
def _laplace_rule_diff(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') n = Wild('n', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(a*Derivative(g, (t, n))) if ma1 and ma1[g].args[0] == t and ma1[n].is_integer: debug('_laplace_apply_rules match:') debug(' f: %s'%(f,)) debug(' rule: time derivative (1.11, 1.12)') d = [] for k in range(ma1[n]): if k==0: y = ma1[g].func(t).subs(t, 0) else: y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0) d.append(s**(ma1[n]-k
include the coefficient in L(A*x')
_laplace_rule_diff
392c40aceadd4c7cdeed0fceb93a763927dc0ca1
sympy
transforms.py
20
22
https://github.com/sympy/sympy.git
6
258
0
61
404
Python
{ "docstring": "\n This internal helper function tries to transform an expression containing\n a derivative of an undefined function and returns `None` if it cannot\n do it.\n ", "language": "en", "n_whitespaces": 37, "n_words": 24, "vocab_size": 22 }
def _laplace_rule_diff(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') n = Wild('n', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(a*Derivative(g, (t, n))) if ma1 and ma1[g].args[0] == t and ma1[n].is_integer: debug('_laplace_apply_rules match:') debug(' f: %s'%(f,)) debug(' rule: time derivative (1.11, 1.12)') d = [] for k in range(ma1[n]): if k==0: y = ma1[g].func(t).subs(t, 0) else: y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0) d.append(s**(ma1[n]-k-1)*y) r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints) return ma1[a]*(r - Add(*d)) return None
17,193
81,376
752
awx/sso/pipeline.py
374
25
def _check_flag(user, flag, attributes, user_flags_settings): new_flag = False is_role_key = "is_%s_role" % (flag) is_attr_key = "is_%s_attr" % (flag) is_value_key = "is_%s_value" % (flag) remove_setting = "remove_%ss" % (flag) # Check to see if we are respecting a role and, if so, does our user have that role? required_roles = user_flags_settings.get(is_role_key, None) if required_roles: matching_roles = _get_matches(required_roles, attributes.get('Role', [])) # We do a 2 layer check here so that we don't spit out the else message if there is no role defined if matching_roles: logger.debug("User %s has %s role(s) %s" % (user.username, flag, ', '.join(matching_roles))) new_flag = True else: logger.debug("User %s is missing the %s role(s) %s" % (user.username, flag, ', '.join(required_roles))) # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined attr_setting = user_flags_settings.get(is_attr_key, None) if attr_setting and attributes.get(attr_setting, None): # Do we have a required value for the attribute required_value = user_flags_settings.get(is_value_key, None) if required_value:
Allow multiple values in SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR.is_*_[value|role] settings (#12558)
_check_flag
782667a34ee45bfe825b29db39c67d4465391bdb
awx
pipeline.py
19
40
https://github.com/ansible/awx.git
12
339
0
171
561
Python
{ "docstring": "\n Helper function to set the is_superuser is_system_auditor flags for the SAML adapter\n Returns the new flag and whether or not it changed the flag\n ", "language": "en", "n_whitespaces": 34, "n_words": 24, "vocab_size": 20 }
def _check_flag(user, flag, attributes, user_flags_settings): new_flag = False is_role_key = "is_%s_role" % (flag) is_attr_key = "is_%s_attr" % (flag) is_value_key = "is_%s_value" % (flag) remove_setting = "remove_%ss" % (flag) # Check to see if we are respecting a role and, if so, does our user have that role? required_roles = user_flags_settings.get(is_role_key, None) if required_roles: matching_roles = _get_matches(required_roles, attributes.get('Role', [])) # We do a 2 layer check here so that we don't spit out the else message if there is no role defined if matching_roles: logger.debug("User %s has %s role(s) %s" % (user.username, flag, ', '.join(matching_roles))) new_flag = True else: logger.debug("User %s is missing the %s role(s) %s" % (user.username, flag, ', '.join(required_roles))) # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined attr_setting = user_flags_settings.get(is_attr_key, None) if attr_setting and attributes.get(attr_setting, None): # Do we have a required value for the attribute required_value = user_flags_settings.get(is_value_key, None) if required_value: # If so, check and see if the value of the attr matches the required value saml_user_attribute_value = attributes.get(attr_setting, None) matching_values = _get_matches(required_value, saml_user_attribute_value) if matching_values: logger.debug("Giving %s %s from attribute %s with matching values %s" % (user.username, flag, attr_setting, ', '.join(matching_values))) new_flag = True # if they don't match make sure that new_flag is false else: logger.debug( "Refusing %s for %s because attr %s (%s) did not match value(s) %s" % (flag, user.username, attr_setting, ", ".join(saml_user_attribute_value), ', '.join(required_value)) ) new_flag = False # If there was no required value then we can just allow them in because of the attribute else: logger.debug("Giving %s %s from attribute %s" % (user.username, flag, attr_setting)) new_flag = True # Get the users old flag old_value = getattr(user, "is_%s" % (flag)) # If we are not removing the flag and they were a system admin and now we don't want them to be just return remove_flag = user_flags_settings.get(remove_setting, True) if not remove_flag and (old_value and not new_flag): logger.debug("Remove flag %s preventing removal of %s for %s" % (remove_flag, flag, user.username)) return old_value, False # If the user was flagged and we are going to make them not flagged make sure there is a message if old_value and not new_flag: logger.debug("Revoking %s from %s" % (flag, user.username)) return new_flag, old_value != new_flag
72,983
249,543
108
tests/storage/test_event_federation.py
30
12
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfi
Only try to backfill event if we haven't tried before recently (#13635) Only try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over. Fix https://github.com/matrix-org/synapse/issues/13622 Fix https://github.com/matrix-org/synapse/issues/8451 Follow-up to https://github.com/matrix-org/synapse/pull/13589 Part of https://github.com/matrix-org/synapse/issues/13356
test_get_backfill_points_in_room
ac1a31740b6d0dfda4d57a25762aaddfde981caf
synapse
test_event_federation.py
11
10
https://github.com/matrix-org/synapse.git
2
67
0
26
115
Python
{ "docstring": "\n Test to make sure we get some backfill points\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] )