ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
@sympify_method_args
48,788
198,089
34
sympy/core/expr.py
12
7
def _imaginary_unit_as_coefficient(arg): if getattr(arg, 'is_real', True): return None else: return arg.as_coefficient(
move _imaginary_unit_as_coefficient to sympy.core.expr
_imaginary_unit_as_coefficient
bad8e3c1d614a05a0b1c6a05c21720f8751f0f2b
sympy
expr.py
11
5
https://github.com/sympy/sympy.git
2
29
1
11
54
Python
{ "docstring": " Helper to extract symbolic coefficient for imaginary unit ", "language": "en", "n_whitespaces": 9, "n_words": 8, "vocab_size": 8 }
def _imaginary_unit_as_coefficient(arg): if getattr(arg, 'is_real', True): return None else: return arg.as_coefficient(S.ImaginaryUnit) @sympify_method_args
83,198
279,959
100
keras/optimizers/optimizer_experimental/optimizer.py
19
8
def from_config(cls, config, custom_objects=None): if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects
Some changes on the new optimizer: 1. Include `custom_objects` in `from_config` for deserializing custom learning rate. 2. Handle the error of seeing unrecognized variable with a better error message. PiperOrigin-RevId: 476505974
from_config
51a6050b936ec87cd684fc1a052f79785ec9aaec
keras
optimizer.py
14
7
https://github.com/keras-team/keras.git
3
52
0
18
88
Python
{ "docstring": "Creates an optimizer from its config.\n\n This method is the reverse of `get_config`, capable of instantiating the\n same optimizer from the config dictionary.\n\n Args:\n config: A Python dictionary, typically the output of get_config.\n custom_objects: A Python dictionary mapping names to additional\n user-defined Python objects needed to recreate this optimizer.\n\n Returns:\n An optimizer instance.\n ", "language": "en", "n_whitespaces": 134, "n_words": 53, "vocab_size": 41 }
def from_config(cls, config, custom_objects=None): if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects ) return cls(**config)
117,113
320,283
363
src/paperless_mail/tests/test_parsers_live.py
81
32
def test_generate_pdf_from_mail(self): mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml")) pdf_path = os.path.join(self.parser.tempdir, "html.eml.pdf") with open(pdf_path, "wb") as file: file.write(self.parser.generate_pdf_from_mail(mail)) converted = os.path.join( self.parser.tempdir, "html.eml.pdf.webp", ) run_convert( density=300, scale="500x5000>", alpha="remove",
add test comments
test_generate_pdf_from_mail
4aa318598fd0dc6c5d4e08dd2a13e7bf614511ec
paperless-ngx
test_parsers_live.py
12
30
https://github.com/paperless-ngx/paperless-ngx.git
1
176
0
70
291
Python
{ "docstring": "\n GIVEN:\n - Fresh start\n WHEN:\n - pdf generation from simple eml file is requested\n THEN:\n - gotenberg is called and the resulting file is returned and look as expected.\n ", "language": "en", "n_whitespaces": 91, "n_words": 29, "vocab_size": 23 }
def test_generate_pdf_from_mail(self): mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml")) pdf_path = os.path.join(self.parser.tempdir, "html.eml.pdf") with open(pdf_path, "wb") as file: file.write(self.parser.generate_pdf_from_mail(mail)) converted = os.path.join( self.parser.tempdir, "html.eml.pdf.webp", ) run_convert( density=300, scale="500x5000>", alpha="remove", strip=True, trim=False, auto_orient=True, input_file=f"{pdf_path}", # Do net define an index to convert all pages. output_file=converted, logging_group=None, ) self.assertTrue(os.path.isfile(converted)) thumb_hash = self.imagehash(converted) # The created pdf is not reproducible. But the converted image should always look the same. expected_hash = self.imagehash( os.path.join(self.SAMPLE_FILES, "html.eml.pdf.webp"), ) self.assertEqual( thumb_hash, expected_hash, f"PDF looks different. Check if {converted} looks weird.", )
53,380
212,750
460
DemoPrograms/Demo_Desktop_Widget_Drive_Usage_Gauges.py
100
33
def new(self, degree=0, color=None): (center_x, center_y, angle, inner_radius, outer_radius, outer_color, pointer_color, origin_color, line_width) = self.all pointer_color = color or pointer_color if self.figure != []: for figure in self.figure: self.graph_elem.DeleteFigure(figure) self.figure = [] d = degree - 90 self.all[2] = degree dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi)) dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi)) dx2 = int(outer_radius * math.sin(d / 180 * math.pi)) dy2 = int(outer_radius * math.cos(d / 180 * math.pi)) self.figure.app
More demo programs updates 🤦‍♂️ wow.....I thought for sure these were checked in....
new
430d1bc77fcdc0969a66ff370ec5e7e590423c83
PySimpleGUI
Demo_Desktop_Widget_Drive_Usage_Gauges.py
13
19
https://github.com/PySimpleGUI/PySimpleGUI.git
4
238
0
61
354
Python
{ "docstring": "\n Draw new pointer by angle, erase old pointer if exist\n degree defined as clockwise from negative x-axis.\n ", "language": "en", "n_whitespaces": 51, "n_words": 17, "vocab_size": 16 }
def new(self, degree=0, color=None): (center_x, center_y, angle, inner_radius, outer_radius, outer_color, pointer_color, origin_color, line_width) = self.all pointer_color = color or pointer_color if self.figure != []: for figure in self.figure: self.graph_elem.DeleteFigure(figure) self.figure = [] d = degree - 90 self.all[2] = degree dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi)) dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi)) dx2 = int(outer_radius * math.sin(d / 180 * math.pi)) dy2 = int(outer_radius * math.cos(d / 180 * math.pi)) self.figure.append(self.graph_elem.DrawLine((center_x - dx1, center_y - dy1), (center_x + dx2, center_y + dy2), color=pointer_color, width=line_width)) self.figure.append(self.graph_elem.DrawCircle((center_x, center_y), inner_radius, fill_color=origin_color, line_color=outer_color, line_width=line_width))
10,241
50,918
610
modules/image/object_detection/yolov3_darknet53_vehicles/processor.py
172
48
def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True): results = data_out.copy_to_cpu() lod = data_out.lod()[0] check_dir(output_dir) if paths: assert type(paths) is list, "type(paths) is not list." if handle_id < len(paths): unhandled_paths = paths[handle_id:] unhandled_paths_num = len(unhandled_paths) else: unhandled_paths_num = 0 if images is not None: if handle_id < len(images): unhandled_paths = None unhandled_paths_num = len(images) - handle_id else: unhandled_paths_num = 0 output = list() for index in range(len(lod) - 1): output_i = {'data': []} if unhandled_paths and index < unhandled_paths_num: org_img_path = unhandled_paths[index] org_img = Image.open(org_img_path) else: org_img = images[index - unhandled_paths_num] org_img = org_img.astype(np.uint8) org_img = Image.fromarray(org_img[:, :, ::-1]) if visualization: org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index))) org_img.save(org_img_path) org_img_height = org_img.height org_img_width = org_img.width result_i = results[lod[index]:lod[index + 1]] for row in result_i: if len(row) != 6: continue if row[1] < score_thresh: continue category_id = int(row[0]) confidence = row[1] bbox = row[2:] dt = {} dt['label'] = label_names[category_id] dt['confidence'] = float(confidence) dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height) output_i['data'].append(dt) output.append(output_i) if visualization: output_i['save_path'] = draw
update yolov3_darknet53_vehicles (#1957) * update yolov3_darknet53_vehicles * update gpu config * update * add clean func * update save inference model
postprocess
7a847a39b1da6e6867031f52f713d92391b9729d
PaddleHub
processor.py
19
50
https://github.com/PaddlePaddle/PaddleHub.git
13
380
0
107
611
Python
{ "docstring": "\n postprocess the lod_tensor produced by Executor.run\n\n Args:\n paths (list[str]): The paths of images.\n images (list(numpy.ndarray)): images data, shape of each is [H, W, C]\n data_out (lod_tensor): data output of predictor.\n output_dir (str): The path to store output images.\n visualization (bool): Whether to save image or not.\n score_thresh (float): the low limit of bounding box.\n label_names (list[str]): label names.\n handle_id (int): The number of images that have been handled.\n\n Returns:\n res (list[dict]): The result of vehicles detecion. keys include 'data', 'save_path', the corresponding value is:\n data (dict): the result of object detection, keys include 'left', 'top', 'right', 'bottom', 'label', 'confidence', the corresponding value is:\n left (float): The X coordinate of the upper left corner of the bounding box;\n top (float): The Y coordinate of the upper left corner of the bounding box;\n right (float): The X coordinate of the lower right corner of the bounding box;\n bottom (float): The Y coordinate of the lower right corner of the bounding box;\n label (str): The label of detection result;\n confidence (float): The confidence of detection result.\n save_path (str): The path to save output images.\n ", "language": "en", "n_whitespaces": 369, "n_words": 181, "vocab_size": 92 }
def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True): results = data_out.copy_to_cpu() lod = data_out.lod()[0] check_dir(output_dir) if paths: assert type(paths) is list, "type(paths) is not list." if handle_id < len(paths): unhandled_paths = paths[handle_id:] unhandled_paths_num = len(unhandled_paths) else: unhandled_paths_num = 0 if images is not None: if handle_id < len(images): unhandled_paths = None unhandled_paths_num = len(images) - handle_id else: unhandled_paths_num = 0 output = list() for index in range(len(lod) - 1): output_i = {'data': []} if unhandled_paths and index < unhandled_paths_num: org_img_path = unhandled_paths[index] org_img = Image.open(org_img_path) else: org_img = images[index - unhandled_paths_num] org_img = org_img.astype(np.uint8) org_img = Image.fromarray(org_img[:, :, ::-1]) if visualization: org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index))) org_img.save(org_img_path) org_img_height = org_img.height org_img_width = org_img.width result_i = results[lod[index]:lod[index + 1]] for row in result_i: if len(row) != 6: continue if row[1] < score_thresh: continue category_id = int(row[0]) confidence = row[1] bbox = row[2:] dt = {} dt['label'] = label_names[category_id] dt['confidence'] = float(confidence) dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height) output_i['data'].append(dt) output.append(output_i) if visualization: output_i['save_path'] = draw_bounding_box_on_image(org_img_path, output_i['data'], output_dir) return output
@pytest.fixture(params=_index_or_series_objs.keys())
40,068
167,616
102
pandas/conftest.py
59
28
def series_with_multilevel_index() -> Series: arrays = [ ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], ["one", "two", "one", "two", "one", "two", "one", "two"], ] tuples = zip(*arrays) index = MultiIndex.from_tuples(tuples) da
TYP: misc return type annotations (#47558)
series_with_multilevel_index
f538568afc2c76c2d738d32e3544cf9fe6742960
pandas
conftest.py
10
14
https://github.com/pandas-dev/pandas.git
1
92
1
45
249
Python
{ "docstring": "\n Fixture with a Series with a 2-level MultiIndex.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 6 }
def series_with_multilevel_index() -> Series: arrays = [ ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], ["one", "two", "one", "two", "one", "two", "one", "two"], ] tuples = zip(*arrays) index = MultiIndex.from_tuples(tuples) data = np.random.randn(8) ser = Series(data, index=index) ser[3] = np.NaN return ser _narrow_series = { f"{dtype.__name__}-series": tm.make_rand_series(name="a", dtype=dtype) for dtype in tm.NARROW_NP_DTYPES } _index_or_series_objs = {**indices_dict, **_series, **_narrow_series} @pytest.fixture(params=_index_or_series_objs.keys())
106,436
307,668
46
homeassistant/components/group/__init__.py
10
3
def _async_stop(self) -> None: if self._async_unsub_state_changed: self._async_unsub_state_changed() self._async_unsub_state_ch
Improve type hints in group (#78350)
_async_stop
5cccb248307d138a33c353544c57dc997b4fe917
core
__init__.py
9
8
https://github.com/home-assistant/core.git
2
23
0
10
41
Python
{ "docstring": "Unregister the group from Home Assistant.\n\n This method must be run in the event loop.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 14 }
def _async_stop(self) -> None: if self._async_unsub_state_changed: self._async_unsub_state_changed() self._async_unsub_state_changed = None
@pytest.fixture(scope="session")
11,567
56,814
129
tests/conftest.py
51
12
def test_database_connection_url(generate_test_database_connection_url): url = generate_test_database_connection_url if url is None: yield None else: # TODO: https://github.com/PrefectHQ/orion/issues/2045 # Also temporarily override the environment variable, so that child # subprocesses that we spin off are correctly configured as well original_envvar = os.environ.get("PREFECT_ORION_DATABASE_CONNECTION_URL") os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = url with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}): yield url os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = original_envvar @pytest.
:facepalm: I got bitten by the async fixture context issue. Fixed and added comments to help future developers.
test_database_connection_url
ef032ee4a8f5d357a6e8dadf4021177ccc71f767
prefect
conftest.py
14
10
https://github.com/PrefectHQ/prefect.git
2
56
1
39
122
Python
{ "docstring": "\n Update the setting for the database connection url to the generated value from\n `generate_test_database_connection_url`\n\n This _must_ be separate from the generation of the test url because async fixtures\n are run in a separate context from the test suite.\n ", "language": "en", "n_whitespaces": 54, "n_words": 38, "vocab_size": 28 }
def test_database_connection_url(generate_test_database_connection_url): url = generate_test_database_connection_url if url is None: yield None else: # TODO: https://github.com/PrefectHQ/orion/issues/2045 # Also temporarily override the environment variable, so that child # subprocesses that we spin off are correctly configured as well original_envvar = os.environ.get("PREFECT_ORION_DATABASE_CONNECTION_URL") os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = url with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}): yield url os.environ["PREFECT_ORION_DATABASE_CONNECTION_URL"] = original_envvar @pytest.fixture(scope="session")
27,941
125,665
84
python/ray/tune/examples/optuna_define_by_run_example.py
50
9
def define_by_run_func(trial) -> Optional[Dict[str, Any]]: # This param is not used in the objective function. activation = trial.suggest_categorical("activation", ["relu", "tanh"]) trial.suggest_float("width", 0, 20) trial.suggest_float("height", -100, 100) # Define-by-run allows for conditional search spaces. if activation == "relu": trial.suggest_float("
[air/tuner/docs] Update docs for Tuner() API 2a: Tune examples (non-docs) (#26931) Splitting up #26884: This PR includes changes to use Tuner() instead of tune.run() for all examples included in python/ray/tune/examples Signed-off-by: xwjiang2010 <[email protected]> Signed-off-by: Kai Fricke <[email protected]> Co-authored-by: xwjiang2010 <[email protected]> Co-authored-by: Richard Liaw <[email protected]>
define_by_run_func
8d7b865614f3635def12c42b653f8acd8b4ae56a
ray
optuna_define_by_run_example.py
10
18
https://github.com/ray-project/ray.git
2
72
0
46
127
Python
{ "docstring": "Define-by-run function to create the search space.\n\n Ensure no actual computation takes place here. That should go into\n the trainable passed to ``Tuner`` (in this example, that's\n ``easy_objective``).\n\n For more information, see https://optuna.readthedocs.io/en/stable\\\n/tutorial/10_key_features/002_configurations.html\n\n This function should either return None or a dict with constant values.\n ", "language": "en", "n_whitespaces": 63, "n_words": 46, "vocab_size": 42 }
def define_by_run_func(trial) -> Optional[Dict[str, Any]]: # This param is not used in the objective function. activation = trial.suggest_categorical("activation", ["relu", "tanh"]) trial.suggest_float("width", 0, 20) trial.suggest_float("height", -100, 100) # Define-by-run allows for conditional search spaces. if activation == "relu": trial.suggest_float("mult", 1, 2) # Return all constants in a dictionary. return {"steps": 100}
43,616
181,843
142
tpot/base.py
34
16
def _compile_to_sklearn(self, expr): sklearn_pipeline_str = generate_pipeline_code( expr_to_tree(expr, self._pset), self.operators ) sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context) sklearn_pipeline.memory = self._memory if self.random_state: # Fix random sta
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
_compile_to_sklearn
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
base.py
11
11
https://github.com/EpistasisLab/tpot.git
2
61
0
30
97
Python
{ "docstring": "Compile a DEAP pipeline into a sklearn pipeline.\n\n Parameters\n ----------\n expr: DEAP individual\n The DEAP pipeline to be compiled\n\n Returns\n -------\n sklearn_pipeline: sklearn.pipeline.Pipeline\n ", "language": "en", "n_whitespaces": 83, "n_words": 23, "vocab_size": 19 }
def _compile_to_sklearn(self, expr): sklearn_pipeline_str = generate_pipeline_code( expr_to_tree(expr, self._pset), self.operators ) sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context) sklearn_pipeline.memory = self._memory if self.random_state: # Fix random state when the operator allows set_param_recursive( sklearn_pipeline.steps, "random_state", self.random_state ) return sklearn_pipeline
47,902
196,402
189
sympy/matrices/repmatrix.py
49
13
def equals(self, other, failing_expression=False): if self.shape != getattr(other, 'shape', None): retu
Moved imports to higher level
equals
59d22b6bb7287613d598611027f640d068ca5748
sympy
repmatrix.py
13
12
https://github.com/sympy/sympy.git
7
93
0
32
141
Python
{ "docstring": "Applies ``equals`` to corresponding elements of the matrices,\n trying to prove that the elements are equivalent, returning True\n if they are, False if any pair is not, and None (or the first\n failing expression if failing_expression is True) if it cannot\n be decided if the expressions are equivalent or not. This is, in\n general, an expensive operation.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x\n >>> A = Matrix([x*(x - 1), 0])\n >>> B = Matrix([x**2 - x, 0])\n >>> A == B\n False\n >>> A.simplify() == B.simplify()\n True\n >>> A.equals(B)\n True\n >>> A.equals(2)\n False\n\n See Also\n ========\n sympy.core.expr.Expr.equals\n ", "language": "en", "n_whitespaces": 264, "n_words": 103, "vocab_size": 72 }
def equals(self, other, failing_expression=False): if self.shape != getattr(other, 'shape', None): return False rv = True for i in range(self.rows): for j in range(self.cols): ans = self[i, j].equals(other[i, j], failing_expression) if ans is False: return False elif ans is not True and rv is True: rv = ans return rv
73,623
251,177
44
mitmproxy/addons/blocklist.py
8
6
def load(self, loader): loader.add_option( "b
use Python 3.9+ typing
load
fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8
mitmproxy
blocklist.py
8
11
https://github.com/mitmproxy/mitmproxy.git
1
23
0
8
37
Python
{ "docstring": "\n Block matching requests and return an empty response with the specified HTTP status.\n Option syntax is \"/flow-filter/status-code\", where flow-filter describes\n which requests this rule should be applied to and status-code is the HTTP status code to return for\n blocked requests. The separator (\"/\" in the example) can be any character.\n Setting a non-standard status code of 444 will close the connection without sending a response.\n ", "language": "en", "n_whitespaces": 132, "n_words": 65, "vocab_size": 52 }
def load(self, loader): loader.add_option( "block_list", Sequence[str], [], )
1,691
9,778
59
gensim/models/translation_matrix.py
24
15
def train(self, tagged_docs): m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs] m2 = [self.ta
Replace np.multiply with np.square and copyedit in translation_matrix.py (#3374) * Replace np.multiply with np.square and copyedit * Copyedit translation_matrix.py Co-authored-by: Michael Penkov <[email protected]>
train
77c3a7ff5254346146d0e9eedf8e84fb6d577878
gensim
translation_matrix.py
12
5
https://github.com/RaRe-Technologies/gensim.git
3
76
0
17
116
Python
{ "docstring": "Build the translation matrix to map from the source model's vectors to target model's vectors\n\n Parameters\n ----------\n tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents\n that will be used for training, both the source language document vector and\n target language document vector trained on those tagged documents.\n\n Returns\n -------\n numpy.ndarray\n Translation matrix that maps from the source model's vectors to target model's vectors.\n\n ", "language": "en", "n_whitespaces": 143, "n_words": 61, "vocab_size": 41 }
def train(self, tagged_docs): m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs] m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs] self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0] return self.translation_matrix
12,993
62,568
35
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/serializer.py
20
11
def serialize(input, tree="etree", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding)
upd; format
serialize
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
serializer.py
9
4
https://github.com/jindongwang/transferlearning.git
1
44
0
19
73
Python
{ "docstring": "Serializes the input token stream using the specified treewalker\n\n :arg input: the token stream to serialize\n\n :arg tree: the treewalker to use\n\n :arg encoding: the encoding to use\n\n :arg serializer_opts: any options to pass to the\n :py:class:`html5lib.serializer.HTMLSerializer` that gets created\n\n :returns: the tree serialized as a string\n\n Example:\n\n >>> from html5lib.html5parser import parse\n >>> from html5lib.serializer import serialize\n >>> token_stream = parse('<html><body><p>Hi!</p></body></html>')\n >>> serialize(token_stream, omit_optional_tags=False)\n '<html><head></head><body><p>Hi!</p></body></html>'\n\n ", "language": "en", "n_whitespaces": 109, "n_words": 66, "vocab_size": 43 }
def serialize(input, tree="etree", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding)
1,853
10,563
83
jina/parsers/__init__.py
28
13
def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser)
refactor: use absolute imports (#4167)
set_client_cli_parser
cea300655ed8be70d74c390ca12e8b09fb741665
jina
__init__.py
10
13
https://github.com/jina-ai/jina.git
2
64
0
23
99
Python
{ "docstring": "Set the parser for the cli client\n\n :param parser: an optional existing parser to build upon\n :return: the parser\n ", "language": "en", "n_whitespaces": 28, "n_words": 19, "vocab_size": 15 }
def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser) return parser
5,141
27,924
111
saleor/graphql/discount/mutations/sale_create.py
36
17
def send_sale_toggle_notification(info, instance, catalogue): manager = info.context.plugins now = datetime.now(pytz.utc) start_date = instance.start_date end_date = instance.end_date if (start_date and start_date <= now) and (not end_date or not end_date <= now): manager.sale_toggle(instance, catalogue)
New event for starting and ending sales (#10110) * Add sale started and sale ended webhooks * Add started_notification_sent and ended_notification_sent flags to Sale model * Add sale_webhook_schedule * Add send_sale_started_and_sale_ended_notifications discount task * Add tests for discount tasks * Move sale task celery beat schedule to settings * Add tests for sale_webhook_schedule * Add sale_started and sale_ended methods to PluginSample * Update send_sale_started_and_sale_ended_notifications logging * Update SaleUpdate mutation - ensure the notification is sent and the flag is changed if needed * Update SaleCreate mutation - send sale_creatd and sale_ended notifications * Optimize fetch_catalogue_info * Clean up * Apply code review suggestions * Add SALE_TOGGLE webhook * Use sale_toggle webhook instead of sale_started and sale_ended * Delete sale_started and sale_eded wbhooks * Drop notification flags from Sale model * Add missing docstrings and comments * Fix failing tests * Update changelog * Add description for SaleToggle event type * Update discount task and webhook schedule * Set notification_sent_datetime to current date by default * Fix typo in comment
send_sale_toggle_notification
67492396aa41d068cac82e8fa328f218b5951d13
saleor
sale_create.py
12
9
https://github.com/saleor/saleor.git
5
79
0
26
128
Python
{ "docstring": "Send a notification about starting or ending sale if it hasn't been sent yet.\n\n Send the notification when the start date is before the current date and the\n sale is not already finished.\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 25 }
def send_sale_toggle_notification(info, instance, catalogue): manager = info.context.plugins now = datetime.now(pytz.utc) start_date = instance.start_date end_date = instance.end_date if (start_date and start_date <= now) and (not end_date or not end_date <= now): manager.sale_toggle(instance, catalogue) instance.notification_sent_datetime = now instance.save(update_fields=["notification_sent_datetime"])
@bcoo_todense_p.def_impl
26,536
119,028
11
jax/experimental/sparse/bcoo.py
10
9
def bcoo_todense(data, indices, *, spinfo): return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spin
[sparse] generalize metadata argument in BCOO primitives
bcoo_todense
2c20d82776fea482aaf52e18ebad4f7fce5c3a81
jax
bcoo.py
9
2
https://github.com/google/jax.git
1
35
1
10
61
Python
{ "docstring": "Convert batched sparse matrix to a dense matrix.\n\n Args:\n data : array of shape ``batch_dims + (nse,) + block_dims``.\n indices : array of shape ``batch_dims + (n_sparse, nse)``\n spinfo : BCOOInfo. In particular, this includes the shape\n of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims``\n where ``len(sparse_dims) == n_sparse``\n\n Returns:\n mat : array with specified shape and dtype matching ``data``\n ", "language": "en", "n_whitespaces": 89, "n_words": 64, "vocab_size": 46 }
def bcoo_todense(data, indices, *, spinfo): return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo) @bcoo_todense_p.def_impl
30,685
135,648
48
rllib/utils/actor_manager.py
16
10
def ignore_ray_errors(self) -> Iterator[ResultOrError]: return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.ge
Refactor ActorManager to store underlying remote actors in dict. (#29953) Signed-off-by: Jun Gong <[email protected]>
ignore_ray_errors
b84dac2609bd587c43ed17bb6fa18fb7241a41de
ray
actor_manager.py
14
10
https://github.com/ray-project/ray.git
3
38
0
16
61
Python
{ "docstring": "Return an iterator over the results, skipping only Ray errors.\n\n Similar to ignore_errors, but only skips Errors raised from the\n Ray framework. This is useful for application that wants to handle\n errors from user code differently.\n ", "language": "en", "n_whitespaces": 64, "n_words": 36, "vocab_size": 31 }
def ignore_ray_errors(self) -> Iterator[ResultOrError]: return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] )
@dataclasses.dataclass
117,906
321,782
70
qutebrowser/utils/usertypes.py
21
7
def certificate_was_accepted(self) -> None: if not self.is_overridable(): return False if self._certificate_accepted is None: raise ValueError("No decision taken yet") return self._certificate_accepted
lint: Fix remaining pylint issues
certificate_was_accepted
ec8eebf99640d5a73072d05e73c6ea9b2ebea556
qutebrowser
usertypes.py
10
7
https://github.com/qutebrowser/qutebrowser.git
3
34
1
17
67
Python
{ "docstring": "Check whether the certificate was accepted by the user.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def certificate_was_accepted(self) -> None: if not self.is_overridable(): return False if self._certificate_accepted is None: raise ValueError("No decision taken yet") return self._certificate_accepted @dataclasses.dataclass
35,292
153,191
65
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py
11
6
def mask(self, row_indices, col_indices):
FIX-#3675: Expand virtual partitioning utility (#3886) Co-authored-by: mvashishtha <[email protected]> Co-authored-by: jeffreykennethli <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Naren Krishna <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Doris Lee <[email protected]> Co-authored-by: Aditya Parameswaran <[email protected]> Co-authored-by: Rehan Sohail Durrani <[email protected]> Co-authored-by: Susmit Vengurlekar <[email protected]> Signed-off-by: Devin Petersohn <[email protected]>
mask
8d1004fdbdaa05700613c8e6287641a732acf606
modin
virtual_partition.py
12
6
https://github.com/modin-project/modin.git
1
30
0
11
47
Python
{ "docstring": "\n Create (synchronously) a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_indices : list-like, slice or label\n The row labels for the rows to extract.\n col_indices : list-like, slice or label\n The column labels for the columns to extract.\n\n Returns\n -------\n PandasOnRayDataframeVirtualPartition\n A new ``PandasOnRayDataframeVirtualPartition`` object,\n materialized.\n ", "language": "en", "n_whitespaces": 155, "n_words": 47, "vocab_size": 35 }
def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_partitions_to_combine[0] .mask(row_indices, col_indices) )
48,898
198,386
92
sympy/integrals/intpoly.py
46
20
def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[fac
Cleanup loops and ranges
left_integral3D
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
sympy
intpoly.py
14
10
https://github.com/sympy/sympy.git
2
103
0
37
149
Python
{ "docstring": "Computes the left integral of Eq 10 in Chin et al.\n\n Explanation\n ===========\n\n For the 3D case, this is the sum of the integral values over constituting\n line segments of the face (which is accessed by facets[index]) multiplied\n by the distance between the first point of facet and that line segment.\n\n Parameters\n ==========\n\n facets :\n List of faces of the 3-Polytope.\n index :\n Index of face over which integral is to be calculated.\n expr :\n Input polynomial.\n vertices :\n List of vertices that constitute the 3-Polytope.\n hp_param :\n The hyperplane parameters of the face.\n degree :\n Degree of the ``expr``.\n\n Examples\n ========\n\n >>> from sympy.integrals.intpoly import left_integral3D\n >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\\\n (5, 0, 5), (5, 5, 0), (5, 5, 5)],\\\n [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\\\n [3, 1, 0, 2], [0, 4, 6, 2]]\n >>> facets = cube[1:]\n >>> vertices = cube[0]\n >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0)\n -50\n ", "language": "en", "n_whitespaces": 333, "n_words": 177, "vocab_size": 108 }
def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[facet[0]] facet_len = len(facet) for i, fac in enumerate(facet): side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) value += distance_to_side(x0, side, hp_param[0]) * \ lineseg_integrate(facet, i, side, expr, degree) return value
6,017
32,887
111
src/transformers/models/deberta/modeling_tf_deberta.py
32
21
def xdropout(self, inputs): mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool,
TF: XLA-trainable DeBERTa v2 (#18546) * fix deberta issues * add different code paths for gpu and tpu * shorter gpu take along axis * Stable Dropout without tf cond * variable must be float
xdropout
34aad0dac000508015d09ed7cf7c88adb5a0e308
transformers
modeling_tf_deberta.py
16
11
https://github.com/huggingface/transformers.git
2
105
0
27
143
Python
{ "docstring": "\n Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
def xdropout(self, inputs): mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool, ) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) if self.drop_prob > 0: inputs = tf.where(mask, 0.0, inputs) * scale
26,500
118,955
122
lib/tests/streamlit/legacy_add_rows_test.py
35
17
def test_deltas_that_melt_dataframes(self): deltas = self._get_deltas_that_melt_dataframe
Remove legacy "`add_rows` coalescing" from ForwardMsgQueue (#4485) Removes the `add_rows` legacy DataFrame concatenation implementation _from Python_. (To be clear: `add_rows` still works for legacy DataFrames, but the logic is frontend-only now. This is already how Arrow DataFrame concatenation is implemented.) ### Background - An app's script sends `Delta` messages to the frontend by sticking them in the `ForwardMsgQueue`. - Generally, `ForwardMsgQueue` has either 0 or 1 messages in it, because the main thread is constantly emptying the queue and sending its contents to the frontend, so messages don't have time to back up. However, sometimes 2+ messages will accumulate before the queue is emptied. - `ForwardMsgQueue` has a couple optimizations where it tries to avoid sending out redundant messages to the frontend - One of those optimizations relates to `add_rows`: for legacy DataFrame serialization *only*, if the queue has a DataFrame message *and* an add_rows message that appends to that same DataFrame, it will perform the add_rows *operation* on the original message and skip sending the add_rows message. (Again, this only applies to legacy DataFrame serialization - by default, apps use Arrow for DataFrame serialization, and this add_rows optimization doesn't exist for that code path: add_rows is *always* applied on the frontend for Arrow.) - `add_rows` will raise an error if the two DataFrames being concatenated are incompatible (for example, if they have different shapes). - Because `ForwardMsgQueue` currently does its enqueuing on the script thread, this isn't an issue: the script will catch the `add_rows` error and show an exception message on the frontend. ### The issue - We're moving to a world where `ScriptRunner` and the main thread are kept as separate as possible. - This means that `ScriptRunner` will no longer enqueue directly into the `ForwardMsgQueue`. Instead, the queue (which is owned by `AppSession`) will be filled only by `AppSession`, and only on the main thread. - This means that **`add_rows` errors will no longer be caught by the script thread**, and so the script won't be able to generate an exception message to send to the frontend. - As things currently stands, this would mean that add_rows errors will just be silently swallowed by the main thread, which we don't want. ### The solution + PR notes - We're just ripping out `add_rows` server-side handling for legacy DataFrames. This brings the `legacy` code path a bit closer to the `Arrow` code path and eliminates a lot of code. - The bulk of this PR is concerned with updating legacy DataFrame tests, many of which relied on `add_rows` happening on the server. - Notably, our e2e tests (with one exception) do *not* change, because there's no observable behavior changes on the frontend. - The one exception is that we did not (and still don't) have e2e tests for handling `add_rows` errors that happen on the frontend. And in fact, we do a terrible job with these errors - we just show an exception modal overlay with a bad error message and no indication that it's related to an add_rows operation. I'll create a ticket to address this (it'll require some product input.)
test_deltas_that_melt_dataframes
0f76064dbb9b9405173effe7f872aa8a8dba69cc
streamlit
legacy_add_rows_test.py
13
8
https://github.com/streamlit/streamlit.git
2
74
0
30
116
Python
{ "docstring": "Some element types require that their dataframes are\n 'melted' (https://pandas.pydata.org/docs/reference/api/pandas.melt.html)\n before being sent to the frontend. Test that the melting occurs.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
def test_deltas_that_melt_dataframes(self): deltas = self._get_deltas_that_melt_dataframes() for delta in deltas: el = delta(DATAFRAME) el._legacy_add_rows(NEW_ROWS) df_proto = _get_data_frame(self.get_delta_from_queue()) # Test that the add_rows delta is properly melted rows = df_proto.data.cols[0].int64s.data self.assertEqual([2, 3, 4, 2, 3, 4], rows)
45,948
188,973
1,294
psutil/_pslinux.py
326
47
def sensors_temperatures(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 # https://github.com/nicolargo/glances/issues/1060 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) basenames = sorted(set([x.split('_')[0] for x in basenames])) # Only add the coretemp hwmon entries if they're not already in # /sys/class/hwmon/ # https://github.com/giampaolo/psutil/issues/1708 # https://github.com/giampaolo/psutil/pull/1648 basenames2 = glob.glob( '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*') repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') for name in basenames2: altname = repl.sub('/sys/class/hwmon/', name) if altname not in basenames: basenames.append(name) for base in basenames: try: path = base + '_input' current = float(bcat(path)) / 1000.0 path = os.path.join(os.path.dirname(base), 'name') unit_name = cat(path).strip() except (IOError, OSError, ValueError): # A lot of things can go wrong here, so let's just skip the # whole entry. Sure thing is Linux's /sys/class/hwmon really # is a stinky broken mess. # https://github.com/giampaolo/psutil/issues/1009 # https://github.com/giampaolo/psutil/issues/1101 # https://github.com/giampaolo/psutil/issues/1129 # https://github.com/giampaolo/psutil/issues/1245 # https://github.com/giampaolo/psutil/issues/1323 continue high = bcat(base + '_max', fallback=None) critical = bcat(base + '_crit', fallback=None) label = cat(base + '_label', fallback='').strip() if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append((label, current, high, critical)) # Indication that no sensors were detected in /sys/class/hwmon/ if not basenames: basenames = glob.glob('/sys/class/thermal/thermal_zone*') basenames = sorted(set(basenames)) for base in basenames: try: path = os.path.join(base, 'temp') current = float(bcat(path)) / 1000.0 path = os.path.join(base, 'type') unit_name = cat(path).strip() except (IOError, OSError, ValueError) as err: debug(err) continue trip_paths = glob.glob(base + '/trip_point*') trip_points = set(['_'.join( os.path.basename(p).split('_')[0:3]) for p in trip_paths]) critical = None hi
[Linux] cat/bcat utils refactoring (#2053)
sensors_temperatures
46cb6c212a870b36bd0af17c48dd29f53468734b
psutil
_pslinux.py
21
72
https://github.com/giampaolo/psutil.git
21
563
0
150
928
Python
{ "docstring": "Return hardware (CPU and others) temperatures as a dict\n including hardware name, label, current, max and critical\n temperatures.\n\n Implementation notes:\n - /sys/class/hwmon looks like the most recent interface to\n retrieve this info, and this implementation relies on it\n only (old distros will probably use something else)\n - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon\n - /sys/class/thermal/thermal_zone* is another one but it's more\n difficult to parse\n ", "language": "en", "n_whitespaces": 101, "n_words": 65, "vocab_size": 54 }
def sensors_temperatures(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 # https://github.com/nicolargo/glances/issues/1060 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) basenames = sorted(set([x.split('_')[0] for x in basenames])) # Only add the coretemp hwmon entries if they're not already in # /sys/class/hwmon/ # https://github.com/giampaolo/psutil/issues/1708 # https://github.com/giampaolo/psutil/pull/1648 basenames2 = glob.glob( '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*') repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') for name in basenames2: altname = repl.sub('/sys/class/hwmon/', name) if altname not in basenames: basenames.append(name) for base in basenames: try: path = base + '_input' current = float(bcat(path)) / 1000.0 path = os.path.join(os.path.dirname(base), 'name') unit_name = cat(path).strip() except (IOError, OSError, ValueError): # A lot of things can go wrong here, so let's just skip the # whole entry. Sure thing is Linux's /sys/class/hwmon really # is a stinky broken mess. # https://github.com/giampaolo/psutil/issues/1009 # https://github.com/giampaolo/psutil/issues/1101 # https://github.com/giampaolo/psutil/issues/1129 # https://github.com/giampaolo/psutil/issues/1245 # https://github.com/giampaolo/psutil/issues/1323 continue high = bcat(base + '_max', fallback=None) critical = bcat(base + '_crit', fallback=None) label = cat(base + '_label', fallback='').strip() if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append((label, current, high, critical)) # Indication that no sensors were detected in /sys/class/hwmon/ if not basenames: basenames = glob.glob('/sys/class/thermal/thermal_zone*') basenames = sorted(set(basenames)) for base in basenames: try: path = os.path.join(base, 'temp') current = float(bcat(path)) / 1000.0 path = os.path.join(base, 'type') unit_name = cat(path).strip() except (IOError, OSError, ValueError) as err: debug(err) continue trip_paths = glob.glob(base + '/trip_point*') trip_points = set(['_'.join( os.path.basename(p).split('_')[0:3]) for p in trip_paths]) critical = None high = None for trip_point in trip_points: path = os.path.join(base, trip_point + "_type") trip_type = cat(path, fallback='').strip() if trip_type == 'critical': critical = bcat(os.path.join(base, trip_point + "_temp"), fallback=None) elif trip_type == 'high': high = bcat(os.path.join(base, trip_point + "_temp"), fallback=None) if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append(('', current, high, critical)) return dict(ret)
15,901
72,541
223
wagtail/admin/views/pages/workflow.py
68
36
def preview_revision_for_task(request, page_id, task_id):
Reformat with black
preview_revision_for_task
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
workflow.py
15
24
https://github.com/wagtail/wagtail.git
3
140
0
57
221
Python
{ "docstring": "Preview the revision linked to the in-progress TaskState of a specified Task. This enables pages in moderation\n to be edited and new TaskStates linked to the new revisions created, with preview links remaining valid", "language": "en", "n_whitespaces": 36, "n_words": 34, "vocab_size": 28 }
def preview_revision_for_task(request, page_id, task_id): page = get_object_or_404(Page, id=page_id) task = get_object_or_404(Task, id=task_id).specific try: task_state = TaskState.objects.get( page_revision__page=page, task=task, status=TaskState.STATUS_IN_PROGRESS ) except TaskState.DoesNotExist: messages.error( request, _( "The page '{0}' is not currently awaiting moderation in task '{1}'." ).format(page.get_admin_display_title(), task.name), ) return redirect("wagtailadmin_home") revision = task_state.page_revision if not task.get_actions(page, request.user): raise PermissionDenied page_to_view = revision.as_page_object() # TODO: provide workflow actions within this view return page_to_view.make_preview_request( request, page.default_preview_mode, extra_request_attrs={"revision_id": revision.id}, )
56,492
221,726
94
python3.10.4/Lib/contextlib.py
44
9
def push_async_callback(self, callback, /, *args, **kwds): _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setti
add python 3.10.4 for windows
push_async_callback
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
contextlib.py
9
5
https://github.com/XX-net/XX-Net.git
1
45
0
39
73
Python
{ "docstring": "Registers an arbitrary coroutine function and arguments.\n\n Cannot suppress exceptions.\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 10 }
def push_async_callback(self, callback, /, *args, **kwds): _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback # Allow use as a decorator
13,285
63,394
789
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
135
35
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserEleme
upd; format
scanString
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
pyparsing.py
20
41
https://github.com/jindongwang/transferlearning.git
13
217
0
80
354
Python
{ "docstring": "\n Scan the input string for expression matches. Each match will return the\n matching tokens, start location, and end location. May be called with optional\n ``maxMatches`` argument, to clip scanning after 'n' matches are found. If\n ``overlap`` is specified, then overlapping matches will be reported.\n\n Note that the start and end locations are reported relative to the string\n being parsed. See :class:`parseString` for more information on parsing\n strings with embedded tabs.\n\n Example::\n\n source = \"sldjf123lsdjjkf345sldkjf879lkjsfd987\"\n print(source)\n for tokens, start, end in Word(alphas).scanString(source):\n print(' '*start + '^'*(end-start))\n print(' '*start + tokens[0])\n\n prints::\n\n sldjf123lsdjjkf345sldkjf879lkjsfd987\n ^^^^^\n sldjf\n ^^^^^^^\n lsdjjkf\n ^^^^^^\n sldkjf\n ^^^^^^\n lkjsfd\n ", "language": "en", "n_whitespaces": 442, "n_words": 99, "vocab_size": 78 }
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc
35,932
154,339
83
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py
18
10
def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, )
PERF-#4794: Compute caches in `_propagate_index_objs` (#4888) Co-authored-by: Mahesh Vashishtha <[email protected]> Signed-off-by: Myachev <[email protected]>
add_to_apply_calls
39b36eb2a2e3bf3d612933e1c78545a8bb28cde4
modin
partition.py
11
7
https://github.com/modin-project/modin.git
1
54
0
18
76
Python
{ "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n length : distributed.Future or int, optional\n Length, or reference to length, of wrapped ``pandas.DataFrame``.\n width : distributed.Future or int, optional\n Width, or reference to width, of wrapped ``pandas.DataFrame``.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnDaskDataframePartition\n A new ``PandasOnDaskDataframePartition`` object.\n\n Notes\n -----\n The keyword arguments are sent as a dictionary.\n ", "language": "en", "n_whitespaces": 259, "n_words": 87, "vocab_size": 54 }
def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, )
6,208
34,178
141
src/transformers/feature_extraction_utils.py
48
18
def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(
[ASR pipeline] correct with lm pipeline (#15200) * [ASR pipeline] correct with lm pipeline * improve error
to_json_string
497346d07ec39da3a7f38a7e0a67a4906c141ea3
transformers
feature_extraction_utils.py
12
15
https://github.com/huggingface/transformers.git
4
85
0
40
142
Python
{ "docstring": "\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.\n ", "language": "en", "n_whitespaces": 56, "n_words": 23, "vocab_size": 20 }
def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
5,070
26,810
357
saleor/core/permissions/__init__.py
125
28
def one_of_permissions_or_auth_filter_required(context, permissions): if not permissions: return True authorization_filters = [ p for p in permissions if isinstance(p, AuthorizationFilters) ] permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)] granted_by_permissions = False granted_by_authorization_filters = False # TODO: move this function from graphql to core from saleor.graphql.utils import get_user_or_app_from_context is_app = bool(getattr(context, "app", None)) requestor = get_user_or_app_from_context(context) if permissions: perm_checks_results = [] for permission in permissions: if is_app and permission == AccountPermissions.MANAGE_STAFF: # `MANAGE_STAFF` permission for apps is not supported, as apps using it # could create a staff user with full access. perm_ch
Include permissions in schema descriptions of protected fields (#9428) * POC Generate permission description for queries * Use PermissionField for app queries * Rename functions checking object ownership * Switch to PermissionsField in more fields * CR fixes * Add missing descriptions
one_of_permissions_or_auth_filter_required
f0a988e798dd86befbbf7a0eda1bc1a8cc94dda2
saleor
__init__.py
16
29
https://github.com/saleor/saleor.git
14
172
0
76
278
Python
{ "docstring": "Determine whether user or app has rights to perform an action.\n\n The `context` parameter is the Context instance associated with the request.\n ", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 21 }
def one_of_permissions_or_auth_filter_required(context, permissions): if not permissions: return True authorization_filters = [ p for p in permissions if isinstance(p, AuthorizationFilters) ] permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)] granted_by_permissions = False granted_by_authorization_filters = False # TODO: move this function from graphql to core from saleor.graphql.utils import get_user_or_app_from_context is_app = bool(getattr(context, "app", None)) requestor = get_user_or_app_from_context(context) if permissions: perm_checks_results = [] for permission in permissions: if is_app and permission == AccountPermissions.MANAGE_STAFF: # `MANAGE_STAFF` permission for apps is not supported, as apps using it # could create a staff user with full access. perm_checks_results.append(False) else: perm_checks_results.append(requestor.has_perm(permission)) granted_by_permissions = any(perm_checks_results) if authorization_filters: auth_filters_results = [] for p in authorization_filters: perm_fn = resolve_authorization_filter_fn(p) if perm_fn: res = perm_fn(context) auth_filters_results.append(bool(res)) granted_by_authorization_filters = any(auth_filters_results) return granted_by_permissions or granted_by_authorization_filters
33,344
144,928
45
python/ray/_private/runtime_env/_clonevirtualenv.py
19
8
def _dirmatch(path, matchwith): matchlen = len(matchwith) if (path.startswith(matchwith)
[runtime env] Support clone `virtualenv` from an existing `virtualenv` (#22309) Before this PR, we can't run ray in virtualenv, cause `runtime_env` does not support create a new virtualenv from an existing virtualenv. More details:https://github.com/ray-project/ray/pull/21801#discussion_r796848499 Co-authored-by: 捕牛 <[email protected]>
_dirmatch
4c73560b313821fbfbb8c943e02c8b298b7c1731
ray
_clonevirtualenv.py
11
6
https://github.com/ray-project/ray.git
3
45
0
18
73
Python
{ "docstring": "Check if path is within matchwith's tree.\n >>> _dirmatch('/home/foo/bar', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar2', '/home/foo/bar')\n False\n >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar')\n False\n ", "language": "en", "n_whitespaces": 60, "n_words": 27, "vocab_size": 16 }
def _dirmatch(path, matchwith): matchlen = len(matchwith) if (path.startswith(matchwith) and path[matchlen:matchlen + 1] in [os.sep, '']): return True return False
70,270
244,173
79
mmdet/models/dense_heads/tood_head.py
37
13
def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y
[Fix] Avoid invalid bbox after deform_sampling (#7567) * Avoid invalid bbox after deform_sampling * replace in-place opertion with torch.where, update docstring * Update
deform_sampling
b403751bd409795cf63fcc6aa7ee280326358bac
mmdetection
tood_head.py
8
5
https://github.com/open-mmlab/mmdetection.git
1
57
0
30
79
Python
{ "docstring": "Sampling the feature x according to offset.\n\n Args:\n feat (Tensor): Feature\n offset (Tensor): Spatial offset for feature sampling\n ", "language": "en", "n_whitespaces": 54, "n_words": 18, "vocab_size": 15 }
def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y
17,285
81,965
694
awxkit/awxkit/api/pages/page.py
171
47
def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = "%s (%s) received" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url']
Register pages for the Instance peers and install bundle endpoints This includes exposing a new interface for Page objects, Page.bytes, to return the full bytestring contents of the response.
page_identity
68a44529b6b77d2d43d7099b654560bfd8bbf518
awx
page.py
13
44
https://github.com/ansible/awx.git
15
337
0
104
536
Python
{ "docstring": "Takes a `requests.Response` and\n returns a new __item_class__ instance if the request method is not a get, or returns\n a __class__ instance if the request path is different than the caller's `endpoint`.\n ", "language": "en", "n_whitespaces": 56, "n_words": 32, "vocab_size": 22 }
def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = "%s (%s) received" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url'] else: endpoint = request_path data = objectify_response_json(response) if request_method in ('get', 'patch', 'put'): # Update existing resource and return it if are_same_endpoint(self.endpoint, request_path): self.json = data self.r = response return self registered_type = get_registered_page(request_path, request_method) return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds) elif response.status_code == http.FORBIDDEN: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) elif is_license_exceeded(response): raise exc.LicenseExceeded(exc_str, data) else: raise exc.Forbidden(exc_str, data) elif response.status_code == http.BAD_REQUEST: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) if is_duplicate_error(response): raise exc.Duplicate(exc_str, data) else: raise exc.BadRequest(exc_str, data) else: raise exc.Unknown(exc_str, data)
21,442
102,077
108
lib/sysinfo.py
33
16
def _installed_conda(self): if not self._is_conda: return None with Popen("conda list", shell=True, stdout=PIPE, stderr=PIPE) as conda: stdout, stderr = conda.communicate() if stderr: return "Could not get package list" installed = stdout.decode(self._encoding, errors="replace").splitlines() return "\n".join(installed)
Allow decoding errors
_installed_conda
48c886b3dce3d3117ad16edaf35c8abd28dc51f5
faceswap
sysinfo.py
12
9
https://github.com/deepfakes/faceswap.git
3
73
0
28
128
Python
{ "docstring": " str: The list of installed Conda packages within Faceswap's scope. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
def _installed_conda(self): if not self._is_conda: return None with Popen("conda list", shell=True, stdout=PIPE, stderr=PIPE) as conda: stdout, stderr = conda.communicate() if stderr: return "Could not get package list" installed = stdout.decode(self._encoding, errors="replace").splitlines() return "\n".join(installed)
7,193
39,296
86
recommenders/models/sasrec/model.py
30
14
def embedding(self, input_seq): seq_embeddings = self.item_embedding_layer(input_seq) seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5) # FIXME positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0)
doc
embedding
d38dffc30c18e9e3280863b32dcc71d01757b181
recommenders
model.py
13
7
https://github.com/microsoft/recommenders.git
1
86
0
22
132
Python
{ "docstring": "Compute the sequence and positional embeddings.\n\n Args:\n input_seq (tf.Tensor): Input sequence\n \n Returns:\n tf.Tensor, tf.Tensor:\n - Sequence embeddings.\n - Positional embeddings.\n \n ", "language": "en", "n_whitespaces": 101, "n_words": 20, "vocab_size": 16 }
def embedding(self, input_seq): seq_embeddings = self.item_embedding_layer(input_seq) seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5) # FIXME positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0) positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1]) positional_embeddings = self.positional_embedding_layer(positional_seq) return seq_embeddings, positional_embeddings
78,648
266,902
808
lib/ansible/utils/display.py
223
36
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True): nocolor = msg if not log_only: has_newline = msg.endswith(u'\n') if has_newline: msg2 = msg[:-1] else: msg2 = msg if color: msg2 = stringc(msg2, color) if has_newline or newline: msg2 = msg2 + u'\n' msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) # Convert back to text string # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the log capture # code in 'bin/ansible-connection' (and other relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if logger and not screen_only: # We first convert to a byte string so that we get rid of # color and characters that are invalid in the u
Remove obsolete Python 2.x controller code.
display
6f445ca6e5c9c8b85ccc5062e00508c69ca26fde
ansible
display.py
16
34
https://github.com/ansible/ansible.git
13
229
0
128
385
Python
{ "docstring": " Display a message to the user\n\n Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 15 }
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True): nocolor = msg if not log_only: has_newline = msg.endswith(u'\n') if has_newline: msg2 = msg[:-1] else: msg2 = msg if color: msg2 = stringc(msg2, color) if has_newline or newline: msg2 = msg2 + u'\n' msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) # Convert back to text string # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the log capture # code in 'bin/ansible-connection' (and other relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to "head -n1" if e.errno != errno.EPIPE: raise if logger and not screen_only: # We first convert to a byte string so that we get rid of # color and characters that are invalid in the user's locale msg2 = to_bytes(nocolor.lstrip(u'\n')) # Convert back to text string msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) lvl = logging.INFO if color: # set logger level based on color (not great) try: lvl = color_to_log_level[color] except KeyError: # this should not happen, but JIC raise AnsibleAssertionError('Invalid color supplied to display: %s' % color) # actually log logger.log(lvl, msg2)
78,254
265,983
105
netbox/extras/views.py
23
13
def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True)
Closes #9623: Implement saved filters (#10801) * Initial work on saved filters * Return only enabled/shared filters * Add tests * Clean up filtering of usable SavedFilters
get_queryset
484efdaf75f267a43f9321b938fda1bc967b9e53
netbox
views.py
11
10
https://github.com/netbox-community/netbox.git
3
62
0
18
101
Python
{ "docstring": "\n Return only shared SavedFilters, or those owned by the current user, unless\n this is a superuser.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True) return queryset.filter( Q(shared=True) | Q(user=user) )
77,779
264,666
264
netbox/extras/api/views.py
64
36
def list(self, request):
Save old JobResults
list
f13a00b2dd33bffc3048c861b494096df457f212
netbox
views.py
18
18
https://github.com/netbox-community/netbox.git
4
135
0
54
222
Python
{ "docstring": "\n Compile all reports and their related results (if any). Result data is deferred in the list view.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 17 }
def list(self, request): report_list = [] report_content_type = ContentType.objects.get(app_label='extras', model='report') results = { r.name: r for r in JobResult.objects.filter( obj_type=report_content_type, status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES ).order_by('name', '-created').distinct('name').defer('data') } # Iterate through all available Reports. for module_name, reports in get_reports(): for report in reports: # Attach the relevant JobResult (if any) to each Report. report.result = results.get(report.full_name, None) report_list.append(report) serializer = serializers.ReportSerializer(report_list, many=True, context={ 'request': request, }) return Response(serializer.data)
78,253
265,967
121
netbox/extras/filtersets.py
38
12
def _usable(self, queryset, name, value): user = self.request.user if self.request else None if not user or user.is_anonymous: if value: return queryset.filter(enabled=True, shared=True) return queryset.filter(Q(enabled=False) | Q(shared=False))
Closes #9623: Implement saved filters (#10801) * Initial work on saved filters * Return only enabled/shared filters * Add tests * Clean up filtering of usable SavedFilters
_usable
484efdaf75f267a43f9321b938fda1bc967b9e53
netbox
filtersets.py
15
9
https://github.com/netbox-community/netbox.git
6
127
0
27
199
Python
{ "docstring": "\n Return only SavedFilters that are both enabled and are shared (or belong to the current user).\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
def _usable(self, queryset, name, value): user = self.request.user if self.request else None if not user or user.is_anonymous: if value: return queryset.filter(enabled=True, shared=True) return queryset.filter(Q(enabled=False) | Q(shared=False)) if value: return queryset.filter(enabled=True).filter(Q(shared=True) | Q(user=user)) return queryset.filter(Q(enabled=False) | Q(Q(shared=False) & ~Q(user=user)))
14,040
65,853
12
erpnext/education/api.py
23
11
def get_current_enrollment(student, academic_year=None): current_academic_year = academic_year or frappe.defaul
style: format code with black
get_current_enrollment
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
api.py
11
19
https://github.com/frappe/erpnext.git
3
55
0
21
85
Python
{ "docstring": "\n\t\tselect\n\t\t\tname as program_enrollment, student_name, program, student_batch_name as student_batch,\n\t\t\tstudent_category, academic_term, academic_year\n\t\tfrom\n\t\t\t`tabProgram Enrollment`\n\t\twhere\n\t\t\tstudent = %s and academic_year = %s\n\t\torder by creation", "language": "en", "n_whitespaces": 18, "n_words": 26, "vocab_size": 22 }
def get_current_enrollment(student, academic_year=None): current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year program_enrollment_list = frappe.db.sql( , (student, current_academic_year), as_dict=1, ) if program_enrollment_list: return program_enrollment_list[0] else: return None
@keras_export("keras.applications.inception_resnet_v2.preprocess_input")
83,430
280,749
443
keras/applications/inception_resnet_v2.py
180
28
def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): if block_type == "block35": branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == "block17": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == "block8": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError( "Unknown Inception-ResNet block type. " 'Expects "block35", "block17" or "block8", ' "but got: " + str(block_type) ) block_name = block_type + "_" + str(block_idx) channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + "_mixed")( branch
Removes the serialization of lambdas Keras tests where necessary and adds SafeModeScope all other lambda-based serialization tests. PiperOrigin-RevId: 495432774
inception_resnet_block
e52c89c7d1bd52d1f0db0da86a72322ba72c1dc1
keras
inception_resnet_v2.py
14
44
https://github.com/keras-team/keras.git
6
336
1
93
520
Python
{ "docstring": "Adds an Inception-ResNet block.\n\n This function builds 3 types of Inception-ResNet blocks mentioned\n in the paper, controlled by the `block_type` argument (which is the\n block name used in the official TF-slim implementation):\n - Inception-ResNet-A: `block_type='block35'`\n - Inception-ResNet-B: `block_type='block17'`\n - Inception-ResNet-C: `block_type='block8'`\n\n Args:\n x: input tensor.\n scale: scaling factor to scale the residuals (i.e., the output of passing\n `x` through an inception module) before adding them to the shortcut\n branch. Let `r` be the output from the residual branch, the output of\n this block will be `x + scale * r`.\n block_type: `'block35'`, `'block17'` or `'block8'`, determines the network\n structure in the residual branch.\n block_idx: an `int` used for generating layer names. The Inception-ResNet\n blocks are repeated many times in this network. We use `block_idx` to\n identify each of the repetitions. For example, the first\n Inception-ResNet-A block will have `block_type='block35', block_idx=0`,\n and the layer names will have a common prefix `'block35_0'`.\n activation: activation function to use at the end of the block (see\n [activations](../activations.md)). When `activation=None`, no activation\n is applied\n (i.e., \"linear\" activation: `a(x) = x`).\n\n Returns:\n Output tensor for the block.\n\n Raises:\n ValueError: if `block_type` is not one of `'block35'`,\n `'block17'` or `'block8'`.\n ", "language": "en", "n_whitespaces": 344, "n_words": 193, "vocab_size": 130 }
def inception_resnet_block(x, scale, block_type, block_idx, activation="relu"): if block_type == "block35": branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == "block17": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == "block8": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError( "Unknown Inception-ResNet block type. " 'Expects "block35", "block17" or "block8", ' "but got: " + str(block_type) ) block_name = block_type + "_" + str(block_idx) channel_axis = 1 if backend.image_data_format() == "channels_first" else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + "_mixed")( branches ) up = conv2d_bn( mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + "_conv", ) x = CustomScaleLayer()(x, up, scale) if activation is not None: x = layers.Activation(activation, name=block_name + "_ac")(x) return x @keras_export("keras.applications.inception_resnet_v2.preprocess_input")
19,814
100,317
152
lib/gui/analysis/stats.py
38
14
def _get_calculations(self): for selection in self._selections: if selection == "raw": continue logger.debug("Calculating: %s", selection) method = getattr(self, f"_calc_{selection}") raw_keys = [key for key in self._stats if key.startswith("raw_")] for key in raw_keys: selected_key = f"{selection}_{key.replace('raw_', '')}" self._stats[selected_key] = method(self._sta
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
_get_calculations
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
stats.py
15
10
https://github.com/deepfakes/faceswap.git
6
79
0
28
156
Python
{ "docstring": " Perform the required calculations and populate :attr:`stats`. ", "language": "en", "n_whitespaces": 8, "n_words": 7, "vocab_size": 7 }
def _get_calculations(self): for selection in self._selections: if selection == "raw": continue logger.debug("Calculating: %s", selection) method = getattr(self, f"_calc_{selection}") raw_keys = [key for key in self._stats if key.startswith("raw_")] for key in raw_keys: selected_key = f"{selection}_{key.replace('raw_', '')}" self._stats[selected_key] = method(self._stats[key])
36,917
157,377
809
ldm/models/diffusion/dpm_solver/dpm_solver.py
228
37
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * mo
release more models
multistep_dpm_solver_second_update
ca86da3a30c4e080d4db8c25fca73de843663cb4
stablediffusion
dpm_solver.py
25
43
https://github.com/Stability-AI/stablediffusion.git
7
449
0
88
681
Python
{ "docstring": "\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n ", "language": "en", "n_whitespaces": 201, "n_words": 91, "vocab_size": 57 }
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 ) return x_t
41,599
175,317
64
Lib/enum.py
25
17
def global_enum(cls, update_str=False): if issubclass(cls, Flag): cls.__repr__ = global_flag_repr else: cls.__repr__ = global_enum_repr if not issubclass(cls, ReprEnum) or update_str: cls.__str__ = global_str sys.modules[cls.__module__].__dict__.updat
bpo-40066: [Enum] update str() and format() output (GH-30582) Undo rejected PEP-663 changes: - restore `repr()` to its 3.10 status - restore `str()` to its 3.10 status New changes: - `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result - zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'` - update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type - added `_numeric_repr_` to `Flag` to control display of unnamed values - enums without doc strings have a more comprehensive doc string added - `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`
global_enum
acf7403f9baea3ae1119fc6b4a3298522188bf96
cpython
enum.py
10
9
https://github.com/python/cpython.git
4
65
0
20
104
Python
{ "docstring": "\n decorator that makes the repr() of an enum member reference its module\n instead of its class; also exports all members to the enum's module's\n global namespace\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 23 }
def global_enum(cls, update_str=False): if issubclass(cls, Flag): cls.__repr__ = global_flag_repr else: cls.__repr__ = global_enum_repr if not issubclass(cls, ReprEnum) or update_str: cls.__str__ = global_str sys.modules[cls.__module__].__dict__.update(cls.__members__) return cls
80,939
272,022
584
keras/engine/training_v1.py
101
28
def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError( "The training_target field for the _TrainingEndpoint " "instance has already been populated" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target =
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
create_training_target
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_v1.py
17
35
https://github.com/keras-team/keras.git
7
172
0
64
276
Python
{ "docstring": "Create training_target instance and update the self.training_target.\n\n Note that the input target should just be a tensor or None, and\n corresponding training target will be created based on the output and\n loss_fn.\n\n Args:\n target: the target tensor for the current output. Could be None.\n run_eagerly: boolean, whether the model is in run_eagerly mode.\n\n Raises:\n ValueError if the training_target field for the current instance has\n already been populated.\n ", "language": "en", "n_whitespaces": 145, "n_words": 67, "vocab_size": 49 }
def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError( "The training_target field for the _TrainingEndpoint " "instance has already been populated" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False ) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output) ) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + "_target", sparse=backend.is_sparse(self.output), dtype=target_dtype, ) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights, )
8,117
43,999
130
tests/models/test_dag.py
45
23
def test_set_task_instance_state(run_id, execution_date, session, dag_maker): start_date = datetime_tz(2020, 1, 1) with dag_maker("test_set_task_instance_state", start_date=start_date, session=session) as dag: task_1 = DummyOperator(task_id="task_1") task_2 = DummyOperator(task_id="ta
Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724) We can now use run_id as well as execution_date to update states of task instances Co-authored-by: Tzu-ping Chung <[email protected]> Co-authored-by: Ash Berlin-Taylor <[email protected]>
test_set_task_instance_state
2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04
airflow
test_dag.py
12
39
https://github.com/apache/airflow.git
2
321
0
38
188
Python
{ "docstring": "Test that set_task_instance_state updates the TaskInstance state and clear downstream failed", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_set_task_instance_state(run_id, execution_date, session, dag_maker): start_date = datetime_tz(2020, 1, 1) with dag_maker("test_set_task_instance_state", start_date=start_date, session=session) as dag: task_1 = DummyOperator(task_id="task_1") task_2 = DummyOperator(task_id="task_2") task_3 = DummyOperator(task_id="task_3") task_4 = DummyOperator(task_id="task_4") task_5 = DummyOperator(task_id="task_5") task_1 >> [task_2, task_3, task_4, task_5] dagrun = dag_maker.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.FAILED, run_type=DagRunType.SCHEDULED, )
95,433
296,453
260
homeassistant/components/roon/config_flow.py
56
22
async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: dis
Improve roon integraton (#66000) * Update to new library, revise discovery to work with new library, specify port to work with new library. * Move user gui to fallback. * Revise tests. * Handle old config. * Improve debugging, refresh faster on load. * Remove duplicate. * Bump library version. * Fix docstring per review. * Review suggestion Co-authored-by: Martin Hjelmare <[email protected]> * Review suggestion Co-authored-by: Martin Hjelmare <[email protected]> * Add check for duplicate host. * Add error message to strings. * Tidy. * Review changes. * Remove default. Co-authored-by: Martin Hjelmare <[email protected]>
async_step_link
23264c8fd4a3f8bcff5961ed11cab6388d3c67a4
core
config_flow.py
14
16
https://github.com/home-assistant/core.git
4
107
0
46
182
Python
{ "docstring": "Handle linking and authenticting with the roon server.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: return self.async_create_entry(title=DEFAULT_NAME, data=info) return self.async_show_form(step_id="link", errors=errors)
54,138
215,744
24
tests/pytests/unit/utils/win_dacl/test_get_sid_string.py
12
10
def test_get_sid_string_none(): sid_obj = sa
Add tests, migrate some tests to pytest
test_get_sid_string_none
3bb43882e727b1d36abe2e501759c9c5e9048ecf
salt
test_get_sid_string.py
10
4
https://github.com/saltstack/salt.git
1
39
0
11
66
Python
{ "docstring": "\n Validate getting a null sid (S-1-0-0) when a null sid is passed\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 9 }
def test_get_sid_string_none(): sid_obj = salt.utils.win_dacl.get_sid(None) assert isinstance(sid_obj, pywintypes.SIDType) assert salt.utils.win_dacl.get_sid_string(sid_obj) == "S-1-0-0"
118,175
322,461
31
paddlenlp/datasets/dataset.py
10
8
def read(self, filename, split='train'): label_list = self.get_labels() vocab_info = self.get_vocab()
[cblue] support converting labels of multi-tasks
read
ba3ea1cffa14d8fddb4d61239d691eba1d711a1d
PaddleNLP
dataset.py
8
40
https://github.com/PaddlePaddle/PaddleNLP.git
12
260
0
9
46
Python
{ "docstring": "\n Returns a dataset containing all the examples that can be read from the file path.\n\n If `self.lazy` is False, this eagerly reads all instances from `self._read()`\n and returns a `MapDataset`.\n\n If `self.lazy` is True, this returns an `IterDataset`, which internally\n relies on the generator created from `self._read()` to lazily produce examples.\n In this case your implementation of `_read()` must also be lazy\n (that is, not load all examples into memory at once).\n\n Args:\n filename (str): Path of data file to read, usually provided by `_get_data` \n function.\n split (str, optional): The split name of selected dataset. This only makes\n a different when data files of different splits have different structures.\n \n Returns:\n A `MapDataset|IterDataset`.\n ", "language": "en", "n_whitespaces": 255, "n_words": 112, "vocab_size": 86 }
def read(self, filename, split='train'): label_list = self.get_labels() vocab_info = self.get_vocab()
16,279
74,629
47
wagtail/core/tests/test_whitelist.py
12
9
def test_no_rule_for_attr(self): tag = self.soup.b fn = attribute_rule({"snowman": "barbec
Reformat with black
test_no_rule_for_attr
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_whitelist.py
11
5
https://github.com/wagtail/wagtail.git
1
38
0
11
70
Python
{ "docstring": "\n Test that attribute_rule() drops attributes for\n which no rule has been defined.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
def test_no_rule_for_attr(self): tag = self.soup.b fn = attribute_rule({"snowman": "barbecue"}) fn(tag) self.assertEqual(str(tag), "<b>baz</b>")
@DeveloperAPI
29,861
132,902
477
python/ray/util/check_serialize.py
103
26
def _inspect_generic_serialization(base_obj, depth, parent, failure_set): assert not inspect.isfunction(base_obj) functions = inspect.getmembers(base_obj, predicate=inspect.isfunction) found = False with _printer.indent(): for name, obj in functions: serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
_inspect_generic_serialization
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
check_serialize.py
14
37
https://github.com/ray-project/ray.git
11
184
1
60
302
Python
{ "docstring": "Adds the first-found non-serializable element to the failure_set.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
def _inspect_generic_serialization(base_obj, depth, parent, failure_set): assert not inspect.isfunction(base_obj) functions = inspect.getmembers(base_obj, predicate=inspect.isfunction) found = False with _printer.indent(): for name, obj in functions: serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break with _printer.indent(): members = inspect.getmembers(base_obj) for name, obj in members: if name.startswith("__") and name.endswith("__") or inspect.isbuiltin(obj): continue serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break if not found: _printer.print( f"WARNING: Did not find non-serializable object in {base_obj}. " "This may be an oversight." ) return found @DeveloperAPI
49,752
200,643
863
sympy/combinatorics/perm_groups.py
287
20
def is_dihedral(self): r if self._is_dihedral is not None: return self._is_dihedral order = self.order() if order % 2 == 1: self._is_dihedral = False return False if or
Add a `PermutationGroup.is_dihedral` property
is_dihedral
624e6f073d5d20e78484f5a0b477469f83678b88
sympy
perm_groups.py
12
75
https://github.com/sympy/sympy.git
18
314
0
131
508
Python
{ "docstring": "\n Return ``True`` if the group is dihedral.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.perm_groups import PermutationGroup\n >>> from sympy.combinatorics.permutations import Permutation\n >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup\n >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6))\n >>> G.is_dihedral\n True\n >>> G = SymmetricGroup(3)\n >>> G.is_dihedral\n True\n >>> G = CyclicGroup(6)\n >>> G.is_dihedral\n False\n\n References\n ==========\n\n .. [1] https://math.stackexchange.com/a/827273\n .. [2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf\n .. [3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf\n .. [4] https://en.wikipedia.org/wiki/Dihedral_group\n ", "language": "en", "n_whitespaces": 225, "n_words": 70, "vocab_size": 48 }
def is_dihedral(self): r if self._is_dihedral is not None: return self._is_dihedral order = self.order() if order % 2 == 1: self._is_dihedral = False return False if order == 2: self._is_dihedral = True return True if order == 4: # The dihedral group of order 4 is the Klein 4-group. self._is_dihedral = not self.is_cyclic return self._is_dihedral if self.is_abelian: # The only abelian dihedral groups are the ones of orders 2 and 4. self._is_dihedral = False return False # Now we know the group is of even order >= 6, and nonabelian. n = order // 2 # Handle special cases where there are exactly two generators. gens = self.generators if len(gens) == 2: x, y = gens a, b = x.order(), y.order() # Make a >= b if a < b: x, y, a, b = y, x, b, a # Using Theorem 2.1 of [3]: if {a, b} == {2}: self._is_dihedral = True return True # Using Theorem 1.1 of [3]: if (a, b) == (n, 2) and y*x*y == ~x: self._is_dihedral = True return True # Procede with algorithm of [1] # Find elements of orders 2 and n order_2, order_n = [], [] for p in self.elements: k = p.order() if k == 2: order_2.append(p) elif k == n: order_n.append(p) if len(order_2) != n + 1 - (n % 2): self._is_dihedral = False return False if not order_n: self._is_dihedral = False return False x = order_n[0] # Want an element y of order 2 that is not a power of x # (i.e. that is not the 180-deg rotation, when n is even). y = order_2[0] if n % 2 == 0 and y == x**(n//2): y = order_2[1] self._is_dihedral = (y*x*y == ~x) return self._is_dihedral
78,343
266,230
45
netbox/dcim/signals.py
14
12
def extend_rearport_cable_paths(instance, created, **kwargs): if created: rearport = instance.rear_port for cablepath in CablePath.objects.filter(_nodes__contains=rearport):
Fixes #10969: Update cable paths ending at associated rear port when creating new front ports
extend_rearport_cable_paths
4e27e8d3dd2cbfe3279bda3631ca92a7facdd334
netbox
signals.py
11
5
https://github.com/netbox-community/netbox.git
3
38
0
14
62
Python
{ "docstring": "\n When a new FrontPort is created, add it to any CablePaths which end at its corresponding RearPort.\n ", "language": "en", "n_whitespaces": 24, "n_words": 17, "vocab_size": 17 }
def extend_rearport_cable_paths(instance, created, **kwargs): if created: rearport = instance.rear_port for cablepath in CablePath.objects.filter(_nodes__contains=rearport): cablepath.retrace()
19,840
100,345
1,104
lib/gui/utils.py
281
50
def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size): logger.debug("Number image_files: %s, frame_dims: %s, thumbnail_size: %s", len(image_files), frame_dims, thumbnail_size) num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size) logger.debug("num_images: %s", num_images) if num_images == 0: return False samples = [] start_idx = len(image_files) - num_images if len(image_files) > num_images else 0 show_files = sorted(image_files, key=os.path.getctime)[start_idx:] dropped_files = [] for fname in show_files: try: img = Image.open(fname) except PermissionError as err: logger.debug("Permission error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue except Exception as err: # pylint:disable=broad-except
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
_load_images_to_cache
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
utils.py
17
61
https://github.com/deepfakes/faceswap.git
13
488
0
176
811
Python
{ "docstring": " Load preview images to the image cache.\n\n Load new images and append to cache, filtering the cache the number of thumbnails that will\n fit inside the display panel.\n\n Parameters\n ----------\n image_files: list\n A list of new image files that have been modified since the last check\n frame_dims: tuple\n The (width (`int`), height (`int`)) of the display panel that will display the preview\n thumbnail_size: int\n The size of each thumbnail that should be created\n\n Returns\n -------\n bool\n ``True`` if images were successfully loaded to cache otherwise ``False``\n ", "language": "en", "n_whitespaces": 209, "n_words": 86, "vocab_size": 60 }
def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size): logger.debug("Number image_files: %s, frame_dims: %s, thumbnail_size: %s", len(image_files), frame_dims, thumbnail_size) num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size) logger.debug("num_images: %s", num_images) if num_images == 0: return False samples = [] start_idx = len(image_files) - num_images if len(image_files) > num_images else 0 show_files = sorted(image_files, key=os.path.getctime)[start_idx:] dropped_files = [] for fname in show_files: try: img = Image.open(fname) except PermissionError as err: logger.debug("Permission error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue except Exception as err: # pylint:disable=broad-except # Swallow any issues with opening an image rather than spamming console # Can happen when trying to read partially saved images logger.debug("Error opening preview file: '%s'. Original error: %s", fname, str(err)) dropped_files.append(fname) continue width, height = img.size scaling = thumbnail_size / max(width, height) logger.debug("image width: %s, height: %s, scaling: %s", width, height, scaling) try: img = img.resize((int(width * scaling), int(height * scaling))) except OSError as err: # Image only gets loaded when we call a method, so may error on partial loads logger.debug("OS Error resizing preview image: '%s'. Original error: %s", fname, err) dropped_files.append(fname) continue if img.size[0] != img.size[1]: # Pad to square new_img = Image.new("RGB", (thumbnail_size, thumbnail_size)) new_img.paste(img, ((thumbnail_size - img.size[0])//2, (thumbnail_size - img.size[1])//2)) img = new_img draw = ImageDraw.Draw(img) draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline="#E5E5E5", width=1) samples.append(np.array(img)) samples = np.array(samples) if not np.any(samples): logger.debug("No preview images collected.") return False if dropped_files: logger.debug("Removing dropped files: %s", dropped_files) show_files = [fname for fname in show_files if fname not in dropped_files] self._previewcache["filenames"] = (self._previewcache["filenames"] + show_files)[-num_images:] cache = self._previewcache["images"] if cache is None: logger.debug("Creating new cache") cache = samples[-num_images:] else: logger.debug("Appending to existing cache") cache = np.concatenate((cache, samples))[-num_images:] self._previewcache["images"] = cache logger.debug("Cache shape: %s", self._previewcache["images"].shape) return True
79,173
267,897
25
test/lib/ansible_test/_internal/commands/integration/coverage.py
11
7
def target_profile(self) -> t.Optional[PosixProfile]: retur
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
target_profile
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
coverage.py
10
3
https://github.com/ansible/ansible.git
2
33
0
11
51
Python
{ "docstring": "The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
def target_profile(self) -> t.Optional[PosixProfile]: return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None
85,817
286,444
2,754
openbb_terminal/portfolio/portfolio_model.py
512
71
def preprocess_transactions(self): p_bar = tqdm(range(14), desc="Preprocessing transactions") try: # 0. If optional fields not in the transactions add missing optional_fields = [ "Sector", "Industry", "Country", "Region", "Fees", "Premium", "ISIN", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions["Date"] = pd.to_datetime(self.__transactions["Date"]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by="Date") p_bar.n += 1 p_bar.refre
Portfolio menu bug fixes (#3204) * normalized way moving average is treated and prevent huge exception prompt * changed descriptions on docs * change to check_positive_float * add integration tests * fix linting * add more integration tests * add more integration tests * fix linting * add some po integration tests * fix flag without prompt * change orderbook to transactions * limit warning to portfolio * change print help * format portfolio var * reformat controller * reformat es * remove autocompletion * change print help * add percentage symbol to summary * fix holp holv * fix scripts * update website doc * add tqdm progress bars * fix table spacing * identify mret tables * remove positive float from rfr * flake8 * pylint * fix reports * revert to old menu help * revert to old menu help * Update test_portfolio.openbb * quick change on empty lines Co-authored-by: hjoaquim <[email protected]> Co-authored-by: James Maslek <[email protected]>
preprocess_transactions
f9086d6f38cf5de4bf3e44be0b4ccd332dbaca46
OpenBBTerminal
portfolio_model.py
23
137
https://github.com/OpenBB-finance/OpenBBTerminal.git
14
838
0
267
1,446
Python
{ "docstring": "Method to preprocess, format and compute auxiliary fields.\n\n Preprocessing steps:\n 0. If optional fields not in the transactions add missing\n 1. Convert Date to datetime\n 2. Sort transactions by date\n 3. Capitalize Ticker and Type [of instrument...]\n 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n 5. Convert quantity to signed integer\n 6. Determining the investment/divestment value\n 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided\n 9. Remove unsupported ISINs that came out empty\n 10. Create tickers dictionary with structure {'Type': [Ticker]}\n 11. Create list with tickers except cash\n 12. Save transactions inception date\n 13. Populate fields Sector, Industry and Country\n ", "language": "en", "n_whitespaces": 284, "n_words": 116, "vocab_size": 92 }
def preprocess_transactions(self): p_bar = tqdm(range(14), desc="Preprocessing transactions") try: # 0. If optional fields not in the transactions add missing optional_fields = [ "Sector", "Industry", "Country", "Region", "Fees", "Premium", "ISIN", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions["Date"] = pd.to_datetime(self.__transactions["Date"]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by="Date") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions["Ticker"] = self.__transactions["Ticker"].map( lambda x: x.upper() ) self.__transactions["Type"] = self.__transactions["Type"].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 self.__transactions["Signal"] = self.__transactions["Side"].map( lambda x: 1 if x.lower() in ["deposit", "buy"] else (-1 if x.lower() in ["withdrawal", "sell"] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions["Quantity"] = ( abs(self.__transactions["Quantity"]) * self.__transactions["Signal"] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions["Investment"] = ( self.__transactions["Quantity"] * self.__transactions["Price"] + self.__transactions["Fees"] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == "CRYPTO"] self.__transactions.loc[ (self.__transactions.Type == "CRYPTO"), "Ticker" ] = [ f"{crypto}-{currency}" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions["yf_Ticker"] = self.__transactions["ISIN"].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions["yf_Ticker"] == "") | (self.__transactions["yf_Ticker"].isna()) ]["Ticker"].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker: # Invalid ISIN but valid ticker self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = np.nan else: self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = "" removed_tickers.append(item) # Merge reformated tickers into Ticker self.__transactions["Ticker"] = self.__transactions["yf_Ticker"].fillna( self.__transactions["Ticker"] ) p_bar.n += 1 p_bar.refresh() # 9. Remove unsupported ISINs that came out empty self.__transactions.drop( self.__transactions[self.__transactions["Ticker"] == ""].index, inplace=True, ) p_bar.n += 1 p_bar.refresh() # 10. Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__transactions["Type"]): self.tickers[ticker_type] = list( set( self.__transactions[ self.__transactions["Type"].isin([ticker_type]) ]["Ticker"] ) ) p_bar.n += 1 p_bar.refresh() # 11. Create list with tickers except cash self.tickers_list = list(set(self.__transactions["Ticker"])) p_bar.n += 1 p_bar.refresh() # 12. Save transactions inception date self.inception_date = self.__transactions["Date"][0] p_bar.n += 1 p_bar.refresh() # 13. Populate fields Sector, Industry and Country if ( self.__transactions.loc[ self.__transactions["Type"] == "STOCK", optional_fields, ] .isnull() .values.any() ): # If any fields is empty for stocks (overwrites any info there) self.load_company_data() p_bar.n += 1 p_bar.refresh() # Warn user of removed ISINs if removed_tickers: console.print( f"\n\n[red]The following tickers are not supported and were removed: {removed_tickers}." f"\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN." f"\nSuffix info on 'Yahoo Finance market coverage':" " https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html" f"\nE.g. IWDA -> IWDA.AS[/red]" ) except Exception: console.print("\nCould not preprocess transactions.")
79,831
269,013
39
keras/optimizers/optimizer_v2/optimizer_v2.py
27
7
def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if getattr(var, "_in_g
Support checkpointing ShardedVariables in optimizer slot variables. PiperOrigin-RevId: 429577423
_var_key
75d70a610dffe927d89ceb400d79bb7f9027b26e
keras
optimizer_v2.py
10
6
https://github.com/keras-team/keras.git
3
39
0
23
69
Python
{ "docstring": "Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Args:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n ", "language": "en", "n_whitespaces": 66, "n_words": 54, "vocab_size": 35 }
def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if getattr(var, "_in_graph_mode", False): return var._shared_name return var._unique_id
76,836
261,492
57
sklearn/ensemble/tests/test_stacking.py
26
19
def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[("ridge", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) asser
ENH StackingClassifier allows regressors in its first layer (#24538) Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
test_stacking_classifier_base_regressor
b1807ff8ead319a08294beeaae90c3f03b2bb8ac
scikit-learn
test_stacking.py
13
9
https://github.com/scikit-learn/scikit-learn.git
1
79
0
25
121
Python
{ "docstring": "Check that a regressor can be used as the first layer in `StackingClassifier`.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[("ridge", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) assert clf.score(X_test, y_test) > 0.8
23,079
108,151
580
lib/matplotlib/backends/backend_svg.py
145
27
def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth:
Deprecate functions in backends
_get_style_dict
ec410abbb3a721e31f3aaa61e9e4f941467e35e1
matplotlib
backend_svg.py
17
37
https://github.com/matplotlib/matplotlib.git
21
342
0
76
558
Python
{ "docstring": "Generate a style string from the GraphicsContext and rgbFace.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth: rgb = gc.get_rgb() attrib['stroke'] = rgb2hex(rgb) if not forced_alpha and rgb[3] != 1.0: attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) if linewidth != 1.0: attrib['stroke-width'] = _short_float_fmt(linewidth) if gc.get_joinstyle() != 'round': attrib['stroke-linejoin'] = gc.get_joinstyle() if gc.get_capstyle() != 'butt': attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] return attrib
25,734
116,362
348
tests/unit/test_executor.py
85
28
def test_union(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = # union all ret = self.command_executor.execute_command( parse_sql(sql.format(union='ALL'), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) =
union command #2852
test_union
61f6f6c3c8154fa0629df8a016d449ceded99879
mindsdb
test_executor.py
14
35
https://github.com/mindsdb/mindsdb.git
1
201
0
53
346
Python
{ "docstring": "\n SELECT a as a1, b as target\n FROM pg.tasks\n UNION {union}\n SELECT model.a as a2, model.p as target2\n FROM pg.tasks as t\n JOIN mindsdb.task_model as model\n WHERE t.a=1 \n ", "language": "en", "n_whitespaces": 131, "n_words": 28, "vocab_size": 20 }
def test_union(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = # union all ret = self.command_executor.execute_command( parse_sql(sql.format(union='ALL'), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 + 2 # union ret = self.command_executor.execute_command( parse_sql(sql.format(union=''), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3
8,522
45,252
89
tests/utils/test_db_cleanup.py
21
13
def test_run_cleanup_tables(self, clean_table_mock, table_names): base_kwargs = dict(
Add `db clean` CLI command for purging old data (#20838) CLI command to delete old rows from airflow metadata database. Notes: * Must supply "purge before date". * Can optionally provide table list. * Dry run will only print the number of rows meeting criteria. * If not dry run, will require the user to confirm before deleting.
test_run_cleanup_tables
c75774d3a31efe749f55ba16e782737df9f53af4
airflow
test_db_cleanup.py
9
8
https://github.com/apache/airflow.git
2
52
0
21
79
Python
{ "docstring": "\n ``_cleanup_table`` should be called for each table in subset if one\n is provided else should be called for all tables.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 16 }
def test_run_cleanup_tables(self, clean_table_mock, table_names): base_kwargs = dict( clean_before_timestamp=None, dry_run=None, verbose=None, ) run_cleanup(**base_kwargs, table_names=table_names) assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict)
8,455
45,040
21
tests/models/test_xcom.py
7
6
def test_set_serialize_call_old_signature(self, get_import, session): serialize_watcher =
Add params dag_id, task_id etc to XCom.serialize_value (#19505) When implementing a custom XCom backend, in order to store XCom objects organized by dag_id, run_id etc, we need to pass those params to `serialize_value`.
test_set_serialize_call_old_signature
56285eee04285d8b6fac90911248d7e9dd5504d8
airflow
test_xcom.py
8
16
https://github.com/apache/airflow.git
1
82
0
7
26
Python
{ "docstring": "\n When XCom.serialize_value takes only param ``value``, other kwargs should be ignored.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def test_set_serialize_call_old_signature(self, get_import, session): serialize_watcher = MagicMock()
69,715
241,856
152
scipy/stats/_stats_py.py
63
17
def gmean(a, axis=0, dtype=None, weights=None): if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray):
ENH: stats: add `axis` tuple and `nan_policy` to `gmean` (#14657) * ENH: stats: add `axis` tuple and `nan_policy` to `gmean` Co-authored-by: Pamphile ROY <[email protected]>
gmean
465da5496a8dda099646e9d5947f24dfc0ec44e9
scipy
_stats_py.py
17
13
https://github.com/scipy/scipy.git
5
147
0
45
228
Python
{ "docstring": "Compute the geometric mean along the specified axis.\n\n Return the geometric average of the array elements.\n That is: n-th root of (x1 * x2 * ... * xn)\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The `weights` array must be broadcastable to the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n Returns\n -------\n gmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n\n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n Beginning in SciPy 1.9, ``np.matrix`` inputs are converted to\n ``np.ndarray``s before the calculation is performed. In this case, the\n output will be a scalar or ``np.ndarray`` of appropriate shape rather than\n a 2D ``np.matrix``. Similarly, while masked elements of masked arrays\n are still ignored, the output will be a scalar or ``np.ndarray`` rather\n than a masked array with ``mask=False``.\n\n References\n ----------\n .. [1] \"Weighted Geometric Mean\", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean.\n\n Examples\n --------\n >>> from scipy.stats import gmean\n >>> gmean([1, 4])\n 2.0\n >>> gmean([1, 2, 3, 4, 5, 6, 7])\n 3.3800151591412964\n\n ", "language": "en", "n_whitespaces": 493, "n_words": 301, "vocab_size": 177 }
def gmean(a, axis=0, dtype=None, weights=None): if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights))
50,868
204,741
47
django/core/serializers/base.py
15
5
def handle_m2m_field(self, obj, field): raise NotImplementedError( "subclasses of Serializer must provide a handle_m2m_field() method" )
Refs #33476 -- Reformatted code with Black.
handle_m2m_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
8
4
https://github.com/django/django.git
1
15
0
15
27
Python
{ "docstring": "\n Called to handle a ManyToManyField.\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
def handle_m2m_field(self, obj, field): raise NotImplementedError( "subclasses of Serializer must provide a handle_m2m_field() method" )
25,818
116,724
209
mindsdb/integrations/handlers/hana_handler/hana_handler.py
61
20
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT * FROM SYS.M_DATABASE') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.su
feat: add sap hana integration
check_connection
db6291bc6a2cbea0154bd41c3abff3f6cfb7bc8a
mindsdb
hana_handler.py
13
20
https://github.com/mindsdb/mindsdb.git
6
103
0
42
188
Python
{ "docstring": "\n Check the connection of the SAP HANA database\n :return: success status and error message if error occurs\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT * FROM SYS.M_DATABASE') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response
47,454
195,867
185
sympy/matrices/common.py
90
18
def extract(self, rowsList, colsList): r if not is_sequence(rowsList) or not is_sequence(colsList): rais
Improved documentation formatting
extract
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
sympy
common.py
12
56
https://github.com/sympy/sympy.git
15
136
0
49
208
Python
{ "docstring": "Return a submatrix by specifying a list of rows and columns.\n Negative indices can be given. All indices must be in the range\n $-n \\le i < n$ where $n$ is the number of rows or columns.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> m = Matrix(4, 3, range(12))\n >>> m\n Matrix([\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [9, 10, 11]])\n >>> m.extract([0, 1, 3], [0, 1])\n Matrix([\n [0, 1],\n [3, 4],\n [9, 10]])\n\n Rows or columns can be repeated:\n\n >>> m.extract([0, 0, 1], [-1])\n Matrix([\n [2],\n [2],\n [5]])\n\n Every other row can be taken by using range to provide the indices:\n\n >>> m.extract(range(0, m.rows, 2), [-1])\n Matrix([\n [2],\n [8]])\n\n RowsList or colsList can also be a list of booleans, in which case\n the rows or columns corresponding to the True values will be selected:\n\n >>> m.extract([0, 1, 2, 3], [True, False, True])\n Matrix([\n [0, 2],\n [3, 5],\n [6, 8],\n [9, 11]])\n ", "language": "en", "n_whitespaces": 426, "n_words": 156, "vocab_size": 95 }
def extract(self, rowsList, colsList): r if not is_sequence(rowsList) or not is_sequence(colsList): raise TypeError("rowsList and colsList must be iterable") # ensure rowsList and colsList are lists of integers if rowsList and all(isinstance(i, bool) for i in rowsList): rowsList = [index for index, item in enumerate(rowsList) if item] if colsList and all(isinstance(i, bool) for i in colsList): colsList = [index for index, item in enumerate(colsList) if item] # ensure everything is in range rowsList = [a2idx(k, self.rows) for k in rowsList] colsList = [a2idx(k, self.cols) for k in colsList] return self._eval_extract(rowsList, colsList)
48,217
196,850
144
sympy/integrals/integrals.py
48
16
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args)
Fix a few docstring formatting issues
integrate
1eeb01e15f06c6692a5bfd6fd2d2a3002d864a07
sympy
integrals.py
14
16
https://github.com/sympy/sympy.git
4
119
0
43
190
Python
{ "docstring": "integrate(f, var, ...)\n\n Explanation\n ===========\n\n Compute definite or indefinite integral of one or more variables\n using Risch-Norman algorithm and table lookup. This procedure is\n able to handle elementary algebraic and transcendental functions\n and also a huge class of special functions, including Airy,\n Bessel, Whittaker and Lambert.\n\n var can be:\n\n - a symbol -- indefinite integration\n - a tuple (symbol, a) -- indefinite integration with result\n given with ``a`` replacing ``symbol``\n - a tuple (symbol, a, b) -- definite integration\n\n Several variables can be specified, in which case the result is\n multiple integration. (If var is omitted and the integrand is\n univariate, the indefinite integral in that variable will be performed.)\n\n Indefinite integrals are returned without terms that are independent\n of the integration variables. (see examples)\n\n Definite improper integrals often entail delicate convergence\n conditions. Pass conds='piecewise', 'separate' or 'none' to have\n these returned, respectively, as a Piecewise function, as a separate\n result (i.e. result will be a tuple), or not at all (default is\n 'piecewise').\n\n **Strategy**\n\n SymPy uses various approaches to definite integration. One method is to\n find an antiderivative for the integrand, and then use the fundamental\n theorem of calculus. Various functions are implemented to integrate\n polynomial, rational and trigonometric functions, and integrands\n containing DiracDelta terms.\n\n SymPy also implements the part of the Risch algorithm, which is a decision\n procedure for integrating elementary functions, i.e., the algorithm can\n either find an elementary antiderivative, or prove that one does not\n exist. There is also a (very successful, albeit somewhat slow) general\n implementation of the heuristic Risch algorithm. This algorithm will\n eventually be phased out as more of the full Risch algorithm is\n implemented. See the docstring of Integral._eval_integral() for more\n details on computing the antiderivative using algebraic methods.\n\n The option risch=True can be used to use only the (full) Risch algorithm.\n This is useful if you want to know if an elementary function has an\n elementary antiderivative. If the indefinite Integral returned by this\n function is an instance of NonElementaryIntegral, that means that the\n Risch algorithm has proven that integral to be non-elementary. Note that\n by default, additional methods (such as the Meijer G method outlined\n below) are tried on these integrals, as they may be expressible in terms\n of special functions, so if you only care about elementary answers, use\n risch=True. Also note that an unevaluated Integral returned by this\n function is not necessarily a NonElementaryIntegral, even with risch=True,\n as it may just be an indication that the particular part of the Risch\n algorithm needed to integrate that function is not yet implemented.\n\n Another family of strategies comes from re-writing the integrand in\n terms of so-called Meijer G-functions. Indefinite integrals of a\n single G-function can always be computed, and the definite integral\n of a product of two G-functions can be computed from zero to\n infinity. Various strategies are implemented to rewrite integrands\n as G-functions, and use this information to compute integrals (see\n the ``meijerint`` module).\n\n The option manual=True can be used to use only an algorithm that tries\n to mimic integration by hand. This algorithm does not handle as many\n integrands as the other algorithms implemented but may return results in\n a more familiar form. The ``manualintegrate`` module has functions that\n return the steps used (see the module docstring for more information).\n\n In general, the algebraic methods work best for computing\n antiderivatives of (possibly complicated) combinations of elementary\n functions. The G-function methods work best for computing definite\n integrals from zero to infinity of moderately complicated\n combinations of special functions, or indefinite integrals of very\n simple combinations of special functions.\n\n The strategy employed by the integration code is as follows:\n\n - If computing a definite integral, and both limits are real,\n and at least one limit is +- oo, try the G-function method of\n definite integration first.\n\n - Try to find an antiderivative, using all available methods, ordered\n by performance (that is try fastest method first, slowest last; in\n particular polynomial integration is tried first, Meijer\n G-functions second to last, and heuristic Risch last).\n\n - If still not successful, try G-functions irrespective of the\n limits.\n\n The option meijerg=True, False, None can be used to, respectively:\n always use G-function methods and no others, never use G-function\n methods, or use all available methods (in order as described above).\n It defaults to None.\n\n Examples\n ========\n\n >>> from sympy import integrate, log, exp, oo\n >>> from sympy.abc import a, x, y\n\n >>> integrate(x*y, x)\n x**2*y/2\n\n >>> integrate(log(x), x)\n x*log(x) - x\n\n >>> integrate(log(x), (x, 1, a))\n a*log(a) - a + 1\n\n >>> integrate(x)\n x**2/2\n\n Terms that are independent of x are dropped by indefinite integration:\n\n >>> from sympy import sqrt\n >>> integrate(sqrt(1 + x), (x, 0, x))\n 2*(x + 1)**(3/2)/3 - 2/3\n >>> integrate(sqrt(1 + x), x)\n 2*(x + 1)**(3/2)/3\n\n >>> integrate(x*y)\n Traceback (most recent call last):\n ...\n ValueError: specify integration variables to integrate x*y\n\n Note that ``integrate(x)`` syntax is meant only for convenience\n in interactive sessions and should be avoided in library code.\n\n >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'\n Piecewise((gamma(a + 1), re(a) > -1),\n (Integral(x**a*exp(-x), (x, 0, oo)), True))\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')\n gamma(a + 1)\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')\n (gamma(a + 1), re(a) > -1)\n\n See Also\n ========\n\n Integral, Integral.doit\n\n ", "language": "en", "n_whitespaces": 1292, "n_words": 865, "vocab_size": 406 }
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args)
45,535
186,624
84
certbot-apache/certbot_apache/_internal/augeasparser.py
23
14
def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
parsed_paths
7d9e9a49005de7961e84d2a7c608db57dbab3046
certbot
augeasparser.py
14
16
https://github.com/certbot/certbot.git
3
57
0
20
89
Python
{ "docstring": "\n Returns a list of file paths that have currently been parsed into the parser\n tree. The returned list may include paths with wildcard characters, for\n example: ['/etc/apache2/conf.d/*.load']\n\n This is typically called on the root node of the ParserNode tree.\n\n :returns: list of file paths of files that have been parsed\n ", "language": "en", "n_whitespaces": 93, "n_words": 50, "vocab_size": 35 }
def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory in paths: for filename in paths[directory]: res_paths.append(os.path.join(directory, filename)) return res_paths
77,592
264,082
142
PyInstaller/building/utils.py
45
14
def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build:
building: clean up the _check_guts_* helpers
_check_guts_toc_mtime
21655572a6af55cefb05d0b0afbeb0b0db39ea19
pyinstaller
utils.py
15
11
https://github.com/pyinstaller/pyinstaller.git
6
82
0
34
131
Python
{ "docstring": "\n Rebuild is required if mtimes of files listed in old TOC are newer than last_build.\n\n If pyc=True, check for .py files as well.\n\n Use this for calculated/analysed values read from cache.\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 29 }
def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build: logger.info("Building because %s changed", src_name) return True elif pyc and typecode == 'PYMODULE': py_filename = src_name[:-1] if misc.mtime(py_filename) > last_build: logger.info("Building because %s changed", py_filename) return True return False
25,826
116,753
213
mindsdb/integrations/handlers/teradata_handler/teradata_handler.py
65
20
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_co
feat: add teradata integration
check_connection
47c5e0ac2d89807f8ff7239d423a3d346bd39a1e
mindsdb
teradata_handler.py
13
20
https://github.com/mindsdb/mindsdb.git
6
103
0
44
188
Python
{ "docstring": "\n Check the connection of the Teradata database\n :return: success status and error message if error occurs\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT 1 FROM (SELECT 1 AS "dual") AS "dual"') response.success = True except teradatasql.Error as e: log.error(f'Error connecting to Teradata {self.host}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response
5,969
32,666
148
utils/prepare_for_doc_test.py
79
22
def process_doc_file(code_file, add_new_line=True): with open(code_file, "r", encoding="utf-8", newline="\n") as f: code = f.read() # fmt: off splits = code.split("```") if len(splits) % 2 != 1: raise ValueError("The number of occurrences of ``` should be an even number.") splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)] clean_code = "```".join(splits) # fmt:
Add a check regarding the number of occurrences of ``` (#18389) Co-authored-by: ydshieh <[email protected]>
process_doc_file
bd6d1b430080aaf7d9a15f908b95631242da3fb0
transformers
prepare_for_doc_test.py
14
13
https://github.com/huggingface/transformers.git
5
132
0
57
236
Python
{ "docstring": "\n Process given file.\n\n Args:\n code_file (`str` or `os.PathLike`): The file in which we want to style the docstring.\n ", "language": "en", "n_whitespaces": 35, "n_words": 18, "vocab_size": 18 }
def process_doc_file(code_file, add_new_line=True): with open(code_file, "r", encoding="utf-8", newline="\n") as f: code = f.read() # fmt: off splits = code.split("```") if len(splits) % 2 != 1: raise ValueError("The number of occurrences of ``` should be an even number.") splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)] clean_code = "```".join(splits) # fmt: on diff = clean_code != code if diff: print(f"Overwriting content of {code_file}.") with open(code_file, "w", encoding="utf-8", newline="\n") as f: f.write(clean_code)
20,661
101,241
330
plugins/extract/align/_base.py
82
28
def finalize(self, batch): for face, landmarks in zip(batch["detected_faces"], batch["landmarks"]): if not isinstance(landmarks, np.ndarray): landmarks = np.array(landmarks) face._landmarks_xy = landmarks logger.trace("Item out: %s", {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.items()}) for filename, face in zip(batch["filename"], batch["detected_faces"]): self._output_faces.append(face)
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
finalize
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
_base.py
13
18
https://github.com/deepfakes/faceswap.git
7
174
0
66
280
Python
{ "docstring": " Finalize the output from Aligner\n\n This should be called as the final task of each `plugin`.\n\n Pairs the detected faces back up with their original frame before yielding each frame.\n\n Parameters\n ----------\n batch : dict\n The final ``dict`` from the `plugin` process. It must contain the `keys`:\n ``detected_faces``, ``landmarks``, ``filename``\n\n Yields\n ------\n :class:`~plugins.extract.pipeline.ExtractMedia`\n The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes\n and landmarks for the detected faces found in the frame.\n ", "language": "en", "n_whitespaces": 184, "n_words": 76, "vocab_size": 59 }
def finalize(self, batch): for face, landmarks in zip(batch["detected_faces"], batch["landmarks"]): if not isinstance(landmarks, np.ndarray): landmarks = np.array(landmarks) face._landmarks_xy = landmarks logger.trace("Item out: %s", {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.items()}) for filename, face in zip(batch["filename"], batch["detected_faces"]): self._output_faces.append(face) if len(self._output_faces) != self._faces_per_filename[filename]: continue output = self._extract_media.pop(filename) output.add_detected_faces(self._output_faces) self._output_faces = [] logger.trace("Final Output: (filename: '%s', image shape: %s, detected_faces: %s, " "item: %s)", output.filename, output.image_shape, output.detected_faces, output) yield output # <<< PROTECTED METHODS >>> # # << PROCESS_INPUT WRAPPER >>
52,763
209,670
49
scapy/layers/dcerpc.py
27
8
def find_dcerpc_interface(name): try: return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name) except StopItera
[MS-RPCE] and [MS-SMB] major update (#3683) * Various fixes regarding DCE/RPC build * DCE/RPC sessions * Cleanup unused code * Add missing GSS_WRAP algo names * Add find_dcerpc_interface * Split SMB client and server * Missing StrFixedLenFieldUtf16 * Remove unfinished smbserver feature * Friendlier getter for SMB2 * DceRpcNak * Improve NDR parsing (a lot) * Minor SMB2 improvements * BIG NDR refactor + Dissect pointer deferal * Build with pointer deferral * Small build bugs * SMB2 logoff, fix rawToken in SMB standalone * Add security providers from MS-RPCE to DCERPC * Cleanup ptr_pack of NDRPacketListField * Clearer exception in find_dcerpc_interface * Add minor_version attribute * Fix computation of auth_pad in sec_trailer * Fix a WTF bug * Compute length for NDR arrays * Pass enum to EnumField * Match union attributes from response with request * Improve SMB server * Small bug in pointer deferal dissection * Add user-friendly utils * Add a few NDR tests * More user-friendly improvements * Bug: parent not copied in clone_with * Build: propagate NDR64 and bug fix * Default close response parameters * Fix Python 2.7 * Fix SMB2_Create_Context offset * Fix SMB2 create context * SMB2: support chain, improvements * Fix ioctl error * SMB: check computeNTProofStr * Fix UTCField default * Improve FileId capabilities * SMB2: contexts * Typos * Minor NDRUnion fixes * Py2 fixes
find_dcerpc_interface
ca10c5cf00425d0178998ec0b006cbb65ddbfb54
scapy
dcerpc.py
12
5
https://github.com/secdev/scapy.git
4
35
0
27
62
Python
{ "docstring": "\n Find an interface object through the name in the IDL\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 9 }
def find_dcerpc_interface(name): try: return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name) except StopIteration: raise AttributeError("Unknown interface !") # --- NDR fields - [C706] chap 14
43,806
182,367
194
tests/test_animator.py
86
16
def test_animatable(): animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, "bar", time, 3.0, start_value=Animatable(20.0), end_value=Animatable(50.0), final_value=Animatable(50.0), easing=lambda x: x, ) assert
fix and test for animator
test_animatable
8be6ea91f6e8a8d24d385975f1a5a7714cf27894
textual
test_animator.py
11
23
https://github.com/Textualize/textual.git
1
170
0
49
222
Python
{ "docstring": "Test SimpleAnimation works with the Animatable protocol", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def test_animatable(): animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, "bar", time, 3.0, start_value=Animatable(20.0), end_value=Animatable(50.0), final_value=Animatable(50.0), easing=lambda x: x, ) assert animation(time) is False assert animatable.bar.value == 20.0 assert animation(time + 1.0) is False assert animatable.bar.value == 30.0 assert animation(time + 2.0) is False assert animatable.bar.value == 40.0 assert animation(time + 2.9) is False assert pytest.approx(animatable.bar.value, 49.0) assert animation(time + 3.0) is True # True to indicate animation is complete assert animatable.bar.value == 50.0
53,068
211,340
650
ppdet/metrics/map_utils.py
125
38
def update(self, bbox, score, label, gt_box, gt_label, difficult=None): if difficult is None: difficult = np.zeros_like(gt_label) # record class gt count for gtl, diff in zip(gt_label, difficult): if self.evaluate_difficult or int(diff) == 0: self.class_gt_counts[int(np.array(gtl))] += 1 # record class score positive visited = [False] * len(gt_label) for b, s, l in zip(bbox, score, label): pred = b.tolist() if isinstance(b, np.ndarray) else b max_idx = -1 max_overlap = -1.0 for i, gl in enumerate(gt_label): if int(gl) == int(l): if len(gt_box[i]) == 8: overlap = calc_rbox_iou(pred, gt_box[i]) else:
Refactor rbox (#6704) * refactor rbox * modify the code of save results * fix some problem * add .gitignore in dataset/dota * fix test anno path
update
e55e41945d42db787a0f7c557d53d06a6b24536b
PaddleDetection
map_utils.py
19
31
https://github.com/PaddlePaddle/PaddleDetection.git
15
303
0
81
450
Python
{ "docstring": "\n Update metric statics from given prediction and ground\n truth infomations.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
def update(self, bbox, score, label, gt_box, gt_label, difficult=None): if difficult is None: difficult = np.zeros_like(gt_label) # record class gt count for gtl, diff in zip(gt_label, difficult): if self.evaluate_difficult or int(diff) == 0: self.class_gt_counts[int(np.array(gtl))] += 1 # record class score positive visited = [False] * len(gt_label) for b, s, l in zip(bbox, score, label): pred = b.tolist() if isinstance(b, np.ndarray) else b max_idx = -1 max_overlap = -1.0 for i, gl in enumerate(gt_label): if int(gl) == int(l): if len(gt_box[i]) == 8: overlap = calc_rbox_iou(pred, gt_box[i]) else: overlap = jaccard_overlap(pred, gt_box[i], self.is_bbox_normalized) if overlap > max_overlap: max_overlap = overlap max_idx = i if max_overlap > self.overlap_thresh: if self.evaluate_difficult or \ int(np.array(difficult[max_idx])) == 0: if not visited[max_idx]: self.class_score_poss[int(l)].append([s, 1.0]) visited[max_idx] = True else: self.class_score_poss[int(l)].append([s, 0.0]) else: self.class_score_poss[int(l)].append([s, 0.0])
16,182
73,936
77
wagtail/core/permission_policies/collections.py
20
12
def _get_permission_objects_for_actions(self, actions): permission_codenames = [ "%s_%s" % (action, self.model_name) for action in actions ] return Permission.objects.filter( content_ty
Reformat with black
_get_permission_objects_for_actions
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
collections.py
10
7
https://github.com/wagtail/wagtail.git
2
42
0
20
66
Python
{ "docstring": "\n Get a queryset of the Permission objects for the given actions\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
def _get_permission_objects_for_actions(self, actions): permission_codenames = [ "%s_%s" % (action, self.model_name) for action in actions ] return Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames )
117,569
321,163
411
qutebrowser/browser/webengine/webview.py
99
31
def createWindow(self, wintype): debug_type = debug.qenum_key(QWebEnginePage, wintype) background = config.val.tabs.background log.webview.debug("createWindow with type {}, background {}".format( debug_type, background)) if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow: # Shift-Alt-Click target = usertypes.ClickTarget.window elif wintype == QWebEnginePage.WebWindowType.WebDialog: log.webview.warning("{} requested, but we don't support " "that!".format(debug_type)) target = usertypes.ClickTarget.tab elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab: # Middle-click / Ctrl-Click with Shift # FIXME:qtwebengine this also affects target=_blank links... if background: target = usertypes.Click
Run scripts/dev/rewrite_enums.py
createWindow
0877fb0d78635692e481c8bde224fac5ad0dd430
qutebrowser
webview.py
14
25
https://github.com/qutebrowser/qutebrowser.git
7
172
0
60
287
Python
{ "docstring": "Called by Qt when a page wants to create a new window.\n\n This function is called from the createWindow() method of the\n associated QWebEnginePage, each time the page wants to create a new\n window of the given type. This might be the result, for example, of a\n JavaScript request to open a document in a new window.\n\n Args:\n wintype: This enum describes the types of window that can be\n created by the createWindow() function.\n\n QWebEnginePage::WebBrowserWindow:\n A complete web browser window.\n QWebEnginePage::WebBrowserTab:\n A web browser tab.\n QWebEnginePage::WebDialog:\n A window without decoration.\n QWebEnginePage::WebBrowserBackgroundTab:\n A web browser tab without hiding the current visible\n WebEngineView.\n\n Return:\n The new QWebEngineView object.\n ", "language": "en", "n_whitespaces": 397, "n_words": 106, "vocab_size": 66 }
def createWindow(self, wintype): debug_type = debug.qenum_key(QWebEnginePage, wintype) background = config.val.tabs.background log.webview.debug("createWindow with type {}, background {}".format( debug_type, background)) if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow: # Shift-Alt-Click target = usertypes.ClickTarget.window elif wintype == QWebEnginePage.WebWindowType.WebDialog: log.webview.warning("{} requested, but we don't support " "that!".format(debug_type)) target = usertypes.ClickTarget.tab elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab: # Middle-click / Ctrl-Click with Shift # FIXME:qtwebengine this also affects target=_blank links... if background: target = usertypes.ClickTarget.tab else: target = usertypes.ClickTarget.tab_bg elif wintype == QWebEnginePage.WebWindowType.WebBrowserBackgroundTab: # Middle-click / Ctrl-Click if background: target = usertypes.ClickTarget.tab_bg else: target = usertypes.ClickTarget.tab else: raise ValueError("Invalid wintype {}".format(debug_type)) tab = shared.get_tab(self._win_id, target) return tab._widget # pylint: disable=protected-access
75,856
259,662
40
sklearn/ensemble/_gb.py
12
11
def predict(self, X): raw_predictions = self.decision_function(X) encode
DEP loss_ attribute in gradient boosting (#23079)
predict
0d669dc419524eff7f45032f4c18253e627a055b
scikit-learn
_gb.py
9
4
https://github.com/scikit-learn/scikit-learn.git
1
39
0
11
63
Python
{ "docstring": "Predict class for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n ", "language": "en", "n_whitespaces": 140, "n_words": 47, "vocab_size": 39 }
def predict(self, X): raw_predictions = self.decision_function(X) encoded_labels = self._loss._raw_prediction_to_decision(raw_predictions) return self.classes_.take(encoded_labels, axis=0)
42,319
177,255
29
networkx/algorithms/operators/all.py
17
6
def union_all(graphs, rename=()): R = None
Make all.py generator friendly (#5984) * Make compose_all generator friendly * Make disjoint_union_all and intersection_all generator friendly * Refactor disjoint_union_all to yield relabeled graphs * Make union_all generator friendly * Fix intersection_all * Fix union_all signature * Allow passing an infinite rename generator to union_all * Copy over generalizations to binary.py * Clean up rename * Simplify first_label in disjoint_union_all * Simplify disjoint_union_all * Add missing R.graph.update in intersection_all
union_all
50ff08de69c6e9541cd6c029bede5dabf56cfe73
networkx
all.py
8
66
https://github.com/networkx/networkx.git
8
194
0
16
35
Python
{ "docstring": "Returns the union of all graphs.\n\n The graphs must be disjoint, otherwise an exception is raised.\n\n Parameters\n ----------\n graphs : iterable\n Iterable of NetworkX graphs\n\n rename : iterable , optional\n Node names of graphs can be changed by specifying the tuple\n rename=('G-','H-') (for example). Node \"u\" in G is then renamed\n \"G-u\" and \"v\" in H is renamed \"H-v\". Infinite generators (like itertools.count)\n are also supported.\n\n Returns\n -------\n U : a graph with the same type as the first graph in list\n\n Raises\n ------\n ValueError\n If `graphs` is an empty list.\n\n Notes\n -----\n To force a disjoint union with node relabeling, use\n disjoint_union_all(G,H) or convert_node_labels_to integers().\n\n Graph, edge, and node attributes are propagated to the union graph.\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n\n See Also\n --------\n union\n disjoint_union_all\n ", "language": "en", "n_whitespaces": 252, "n_words": 146, "vocab_size": 100 }
def union_all(graphs, rename=()): R = None seen_nodes = set() # rename graph to obtain disjoint node labels
76,685
261,192
123
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
63
26
def test_unknown_category_that_are_negative(): rng = np.random.RandomState(42)
FIX Treat gradient boosting categoricals outside the bounds as unknown during predict (#24283)
test_unknown_category_that_are_negative
072b481600c48662fd4893fdce461113becd207a
scikit-learn
test_gradient_boosting.py
12
14
https://github.com/scikit-learn/scikit-learn.git
1
157
0
53
238
Python
{ "docstring": "Check that unknown categories that are negative does not error.\n\n Non-regression test for #24274.\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 13 }
def test_unknown_category_that_are_negative(): rng = np.random.RandomState(42) n_samples = 1000 X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)] y = np.zeros(shape=n_samples) y[X[:, 1] % 2 == 0] = 1 hist = HistGradientBoostingRegressor( random_state=0, categorical_features=[False, True], max_iter=10, ).fit(X, y) # Check that negative values from the second column are treated like a # missing category X_test_neg = np.asarray([[1, -2], [3, -4]]) X_test_nan = np.asarray([[1, np.nan], [3, np.nan]]) assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
38,909
161,098
764
ppg_extractor/encoder/encoder_layer.py
225
32
def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None # whether to use macaron style if self.feed_forward_macaron is not None:
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
forward
b617a87ee40ab384767a27335313c2c65ee094ec
MockingBird
encoder_layer.py
14
53
https://github.com/babysor/MockingBird.git
19
449
0
80
699
Python
{ "docstring": "Compute encoded features.\n\n :param torch.Tensor x_input: encoded source features, w/o pos_emb\n tuple((batch, max_time_in, size), (1, max_time_in, size))\n or (batch, max_time_in, size)\n :param torch.Tensor mask: mask for x (batch, max_time_in)\n :param torch.Tensor cache: cache for x (batch, max_time_in - 1, size)\n :rtype: Tuple[torch.Tensor, torch.Tensor]\n ", "language": "en", "n_whitespaces": 92, "n_words": 43, "vocab_size": 31 }
def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None # whether to use macaron style if self.feed_forward_macaron is not None: residual = x if self.normalize_before: x = self.norm_ff_macaron(x) x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) # multi-headed self-attention module residual = x if self.normalize_before: x = self.norm_mha(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + self.concat_linear(x_concat) else: x = residual + self.dropout(x_att) if not self.normalize_before: x = self.norm_mha(x) # convolution module if self.conv_module is not None: residual = x if self.normalize_before: x = self.norm_conv(x) x = residual + self.dropout(self.conv_module(x)) if not self.normalize_before: x = self.norm_conv(x) # feed forward module residual = x if self.normalize_before: x = self.norm_ff(x) x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm_ff(x) if self.conv_module is not None: x = self.norm_final(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask
46,502
191,364
58
tests/unit_tests/test_prompt.py
21
8
def test_prompt_invalid_template_format() -> None: template = "This is a {foo} test." input_variables = ["foo"] with pytest.raises(ValueError):
initial commit
test_prompt_invalid_template_format
18aeb720126a68201c7e3b5a617139c27c779496
langchain
test_prompt.py
11
8
https://github.com/hwchase17/langchain.git
1
37
0
20
69
Python
{ "docstring": "Test initializing a prompt with invalid template format.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_prompt_invalid_template_format() -> None: template = "This is a {foo} test." input_variables = ["foo"] with pytest.raises(ValueError): Prompt( input_variables=input_variables, template=template, template_format="bar" )
73,140
249,805
205
tests/rest/admin/test_user.py
48
13
def test_medium_does_not_exist(self) -> None: # test for unknown medium url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # test for unknown user with a known medium url = "/_synapse/admin/v1/threepid/email/users/unknown" channel = self.make_request( "GET", url, acce
Add an Admin API endpoint for looking up users based on 3PID (#14405)
test_medium_does_not_exist
a3623af74e0af0d2f6cbd37b47dc54a1acd314d5
synapse
test_user.py
10
19
https://github.com/matrix-org/synapse.git
1
110
0
28
178
Python
{ "docstring": "Tests that both a lookup for a medium that does not exist and a user that\n doesn't exist with that third party ID returns a 404", "language": "en", "n_whitespaces": 32, "n_words": 26, "vocab_size": 19 }
def test_medium_does_not_exist(self) -> None: # test for unknown medium url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # test for unknown user with a known medium url = "/_synapse/admin/v1/threepid/email/users/unknown" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@dataclass
121,103
337,646
282
src/accelerate/utils/dataclasses.py
88
19
def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must
DeepSpeed Revamp (#405) * deepspeed revamp * Update dataclasses.py * Update deepspeed.py * quality * fixing code * quality * FIx imports * saving 16bit model in zero stage 3 1. Saving 16bit model in zero stage 3 2. zero init in stage 3 support using HFDeepSpeedConfig * quality * adding test and fixing bugs * update makefile for deepspeed tests * Update test.yml * adding `deepspeed` as requirement for tests * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * quality * addressing comments * add example and minor updates 1. Add example to show the usage of config file with revamped deepspeed support. 2. update required deepspeed version to 0.6.5 2. reverting `reinit` change as it is not required, 3. raising Exception when using `clip_grad_value` with DeepSpeed/FSDP. * Documentation and Zero-3 Inference Support 1. Changes to support ZeRo Stage-3 Inference support. 2. minor bug fixes. 3. Documentation. * doc fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * addressing comments * update doc to address comments and bug fixes 1. update tests and add new one testing autofill functionality of `prepare` method. 2. fix bug related to zero-3 init related to HFDeepSpeedConfig 3. Update documentation addressing comments. * removing image and hosting it on `documentation-images` dataset * check for hidden_size for zero_opt heurisitics Co-authored-by: Sylvain Gugger <[email protected]>
deepspeed_config_process
1703b79a797dab765996764707186def7533d8fd
accelerate
dataclasses.py
14
17
https://github.com/huggingface/accelerate.git
7
137
1
68
228
Python
{ "docstring": "Process the DeepSpeed config with the values from the kwargs.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == "": mismatches_msg = "\n".join(mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch kwargs " f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." ) @dataclass
83,169
279,889
125
keras/engine/training.py
27
10
def get_metrics_result(self): # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.r
Expose Model get_metrics_result on Keras Model as a public API PiperOrigin-RevId: 475681912
get_metrics_result
8cf91871ce167d63069c99120f8580a4976a59d0
keras
training.py
13
9
https://github.com/keras-team/keras.git
3
50
0
22
84
Python
{ "docstring": "Returns the model's metrics values as a dict.\n\n If any of the metric result is a dict (containing multiple metrics),\n each of them gets added to the top level returned dict of this method.\n\n Returns:\n A `dict` containing values of the metrics listed in `self.metrics`.\n Example:\n `{'loss': 0.2, 'accuracy': 0.7}`.\n ", "language": "en", "n_whitespaces": 105, "n_words": 50, "vocab_size": 40 }
def get_metrics_result(self): # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics
55,539
218,899
593
python3.10.4/Lib/lib2to3/refactor.py
95
23
def refactor_docstring(self, input, filename): result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line
add python 3.10.4 for windows
refactor_docstring
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
refactor.py
18
31
https://github.com/XX-net/XX-Net.git
9
211
0
47
333
Python
{ "docstring": "Refactors a docstring, looking for doctests.\n\n This returns a modified version of the input string. It looks\n for doctests, which start with a \">>>\" prompt, and may be\n continued with \"...\" prompts, as long as the \"...\" is indented\n the same as the \">>>\".\n\n (Unfortunately we can't use the doctest module's parser,\n since, like most parsers, it is not geared towards preserving\n the original source.)\n ", "language": "en", "n_whitespaces": 122, "n_words": 65, "vocab_size": 52 }
def refactor_docstring(self, input, filename): result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + "\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return "".join(result)
8,643
45,557
419
tests/utils/test_edgemodifier.py
89
23
def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges): ( dag, group, ( group_dm1, group_dm2, group_dm3, dm_in1, dm_in2, dm_in3, dm_in4, dm_out1, dm_out2, dm_out3, dm_out4, op_in1, op_out1, ), ) = test_complex_taskgroup_dag group_dm1 << [group_dm2, group_dm3] group << dm_in1 group << Label('label dm_in2 <=> group') << dm_in2 group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4] group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key') dm_out1 << group dm_out2 << Label('label group <=> dm_out2') << group [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group compare_dag_edges(dag_edges(dag), complex_dag_expected_edges)
EdgeModifier refactoring (#21404)
test_complex_reversed_dag
ace8c6e942ff5554639801468b971915b7c0e9b9
airflow
test_edgemodifier.py
10
30
https://github.com/apache/airflow.git
1
150
0
50
210
Python
{ "docstring": "Tests the complex reversed dag with a TaskGroup and a Label", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges): ( dag, group, ( group_dm1, group_dm2, group_dm3, dm_in1, dm_in2, dm_in3, dm_in4, dm_out1, dm_out2, dm_out3, dm_out4, op_in1, op_out1, ), ) = test_complex_taskgroup_dag group_dm1 << [group_dm2, group_dm3] group << dm_in1 group << Label('label dm_in2 <=> group') << dm_in2 group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4] group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key') dm_out1 << group dm_out2 << Label('label group <=> dm_out2') << group [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group compare_dag_edges(dag_edges(dag), complex_dag_expected_edges)
118,121
322,323
80
paddlenlp/ops/faster_transformer/sample/plato_inference.py
30
12
def postprocess_response(token_ids, tokenizer): eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens
FasterUnifiedTransformer/PLATO support dy2sta (#1717) * support ut dy2sta * use jit load
postprocess_response
4c36ef9e41ea6b0e43935bdf6b2f1b4a1f8de809
PaddleNLP
plato_inference.py
10
10
https://github.com/PaddlePaddle/PaddleNLP.git
3
60
0
22
98
Python
{ "docstring": "Post-process the decoded sequence. Truncate from the first <eos>.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def postprocess_response(token_ids, tokenizer): eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens
121,193
338,219
201
src/accelerate/accelerator.py
66
16
def clip_grad_norm_(self, parameters, max_norm, norm_type=2): if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return mod
Return unclipped gradient from grad_clip_norm_ (#756)
clip_grad_norm_
693d46826e32507376d44f99967df4710886c984
accelerate
accelerator.py
14
11
https://github.com/huggingface/accelerate.git
7
101
0
48
156
Python
{ "docstring": "\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n\n Returns:\n `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).\n\n Example:\n\n ```python\n >>> from accelerate import Accelerator\n\n >>> accelerator = Accelerator(gradient_accumulation_steps=2)\n >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\n\n >>> for (input, target) in dataloader:\n ... optimizer.zero_grad()\n ... output = model(input)\n ... loss = loss_func(output, target)\n ... accelerator.backward(loss)\n ... if accelerator.sync_gradients:\n ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)\n ... optimizer.step()\n ```\n ", "language": "en", "n_whitespaces": 232, "n_words": 69, "vocab_size": 52 }
def clip_grad_norm_(self, parameters, max_norm, norm_type=2): if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
56,689
222,663
399
python3.10.4/Lib/distutils/command/build_clib.py
108
15
def check_library_list(self, libraries): if not isinstance(libraries, list): raise DistutilsSetupError( "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError("bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "mus
add python 3.10.4 for windows
check_library_list
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
build_clib.py
14
20
https://github.com/XX-net/XX-Net.git
10
113
0
65
199
Python
{ "docstring": "Ensure that the list of libraries is valid.\n\n `library` is presumably provided as a command option 'libraries'.\n This method checks that it is a list of 2-tuples, where the tuples\n are (library_name, build_info_dict).\n\n Raise DistutilsSetupError if the structure is invalid anywhere;\n just returns otherwise.\n ", "language": "en", "n_whitespaces": 86, "n_words": 44, "vocab_size": 35 }
def check_library_list(self, libraries): if not isinstance(libraries, list): raise DistutilsSetupError( "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError("bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "must be a dictionary (build info)")
3,366
20,431
563
pipenv/patched/notpip/_vendor/pygments/lexer.py
130
16
def _process_new_state(cls, new_state, unprocessed, processed): if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_process_new_state
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
lexer.py
15
30
https://github.com/pypa/pipenv.git
11
177
0
69
288
Python
{ "docstring": "Preprocess the state transition action of a token definition.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _process_new_state(cls, new_state, unprocessed, processed): if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state
75,661
259,226
169
sklearn/preprocessing/tests/test_encoders.py
89
25
def test_ohe_infrequent_two_levels_user_cats(): X_train = np.array( [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtyp
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
test_ohe_infrequent_two_levels_user_cats
7f0006c8aad1a09621ad19c3db19c3ff0555a183
scikit-learn
test_encoders.py
16
18
https://github.com/scikit-learn/scikit-learn.git
2
203
0
67
332
Python
{ "docstring": "Test that the order of the categories provided by a user is respected.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def test_ohe_infrequent_two_levels_user_cats(): X_train = np.array( [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object ).T ohe = OneHotEncoder( categories=[["c", "d", "a", "b"]], sparse=False, handle_unknown="infrequent_if_exist", max_categories=2, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]]) X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'infrequent' is used to denote the infrequent categories for # `inverse_transform` expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv)
76,200
260,354
119
sklearn/decomposition/_sparse_pca.py
34
14
def fit(self, X, y=None): self._v
MAINT Use _validate_params in SparsePCA and MiniBatchSparsePCA (#23710) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
fit
db6123fe40400828918037f3fae949bfcc4d9d05
scikit-learn
_sparse_pca.py
10
11
https://github.com/scikit-learn/scikit-learn.git
2
85
0
24
135
Python
{ "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "language": "en", "n_whitespaces": 153, "n_words": 53, "vocab_size": 43 }
def fit(self, X, y=None): self._validate_params() random_state = check_random_state(self.random_state) X = self._validate_data(X) self.mean_ = X.mean(axis=0) X = X - self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components return self._fit(X, n_components, random_state)
11,200
55,077
74
tests/cli/test_profile.py
21
11
def test_create_profile(): invoke_and_assert( ["profile", "create", "foo"], expected_output=( f ), ) profiles = load_profiles() assert profiles["foo"] == Profile( name="foo", settings={}, source=PREFECT_PROFI
Add tests for profile CLI
test_create_profile
808660dd04465fc796a34e835467e8ae1f2449b3
prefect
test_profile.py
11
21
https://github.com/PrefectHQ/prefect.git
1
52
0
20
89
Python
{ "docstring": "\n Created profile 'foo'.\n\n Switch to your new profile with:\n\n prefect profile use 'foo'\n\n Or, to use it for a single command, include the `-p` option:\n\n prefect -p 'foo' config view\n ", "language": "en", "n_whitespaces": 105, "n_words": 30, "vocab_size": 24 }
def test_create_profile(): invoke_and_assert( ["profile", "create", "foo"], expected_output=( f ), ) profiles = load_profiles() assert profiles["foo"] == Profile( name="foo", settings={}, source=PREFECT_PROFILES_PATH.value() )
23,693
109,648
160
lib/matplotlib/tests/test_axes.py
97
21
def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]]
Curved polar errorbars - uses _interpolation_steps - prefers transform MarkerStyle in init over _transform property - adjusted what's new - added more tests for overlapping, asymmetric and long errorbars - combine all tests to a single figure - remove overlappnig since it does not work same on all platforms - rework test figure, add overlapping, might work by avoiding grid - update what's new with image and link to example
test_mixed_errorbar_polar_caps
907f78dbf959c0609ab484c59e840eea3eafee31
matplotlib
test_axes.py
11
17
https://github.com/matplotlib/matplotlib.git
1
273
0
72
348
Python
{ "docstring": "\n Mix several polar errorbar use cases in a single test figure.\n\n It is advisable to position individual points off the grid. If there are\n problems with reproducibility of this test, consider removing grid.\n ", "language": "en", "n_whitespaces": 46, "n_words": 33, "vocab_size": 32 }
def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]] yerr = [[.35, .5, .5], [.5, .35, .5]] ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt="o") # overlapping errorbar th_over = [2.1] r_over = [3.1] ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt="o")
41,701
176,114
46
tests/test_eval_model.py
11
3
def test_edgeql_for_01(self): self.assert_test_query( r, {(1, 1), (2, 2), (3, 3)}, )
Pull assert_data_shape out of testbase.server and use it for model tests (#3315)
test_edgeql_for_01
20ca6e2fa7bab2adc8c37d8c42049076c692782e
edgedb
test_eval_model.py
9
7
https://github.com/edgedb/edgedb.git
1
33
0
11
46
Python
{ "docstring": "\n FOR X IN {1,2,3} UNION ((SELECT X), (SELECT X));\n ", "language": "en", "n_whitespaces": 32, "n_words": 9, "vocab_size": 9 }
def test_edgeql_for_01(self): self.assert_test_query( r, {(1, 1), (2, 2), (3, 3)}, )
117,320
320,737
78
qutebrowser/browser/downloadview.py
17
13
def on_clicked(self, index): if not index.isValid(): return item = self._model().data(index, downloads.ModelRole.item) if item.done and item.successful: item.open_file() item.remove()
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
on_clicked
a20bb67a878b2e68abf8268c1b0a27f018d01352
qutebrowser
downloadview.py
10
7
https://github.com/qutebrowser/qutebrowser.git
4
54
0
16
91
Python
{ "docstring": "Handle clicking of an item.\n\n Args:\n index: The QModelIndex of the clicked item.\n ", "language": "en", "n_whitespaces": 38, "n_words": 13, "vocab_size": 11 }
def on_clicked(self, index): if not index.isValid(): return item = self._model().data(index, downloads.ModelRole.item) if item.done and item.successful: item.open_file() item.remove()
@add_start_docstrings( """ XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_ROBERTA_XL_START_DOCSTRING, )
6,311
34,690
44
src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
27
6
def _tie_weights(self): # To tie those two weights if they get disconnec
Add support for XLM-R XL and XXL models by modeling_xlm_roberta_xl.py (#13727) * add xlm roberta xl * add convert xlm xl fairseq checkpoint to pytorch * fix init and documents for xlm-roberta-xl * fix indention * add test for XLM-R xl,xxl * fix model hub name * fix some stuff * up * correct init * fix more * fix as suggestions * add torch_device * fix default values of doc strings * fix leftovers * merge to master * up * correct hub names * fix docs * fix model * up * finalize * last fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * add copied from * make style Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
_tie_weights
e09473a817c5e5871e11cc81004355ef30250502
transformers
modeling_xlm_roberta_xl.py
8
2
https://github.com/huggingface/transformers.git
1
14
1
27
38
Python
{ "docstring": "\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 21 }
def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, )
3,071
19,706
23
pipenv/installers.py
9
5
def matches_minor(self, other): re
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
matches_minor
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
pipenv
installers.py
8
2
https://github.com/pypa/pipenv.git
1
28
0
9
43
Python
{ "docstring": "Check whether this version matches the other in (major, minor).", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def matches_minor(self, other): return (self.major, self.minor) == (other.major, other.minor)
25,781
116,582
73
mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py
24
15
def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None)
test fix
test_04_query_predictor_single_where_condition
b999051fd8153a1d3624471cac5483867116f985
mindsdb
test_lightwood_handler.py
11
12
https://github.com/mindsdb/mindsdb.git
1
83
0
21
143
Python
{ "docstring": "\n SELECT target\n from {self.test_model_1}\n WHERE sqft=100\n ", "language": "en", "n_whitespaces": 47, "n_words": 6, "vocab_size": 6 }
def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None)