ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
38,865
161,042
50
ppg2mel/train/solver.py
13
13
def progress(self, msg): if self.paras.verbose: sys.stdout.write("\033[K") # Clear line print('[{}] {}'.format(human
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
progress
b617a87ee40ab384767a27335313c2c65ee094ec
MockingBird
solver.py
14
4
https://github.com/babysor/MockingBird.git
2
43
0
13
78
Python
{ "docstring": " Verbose function for updating progress on stdout (do not include newline) ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 11 }
def progress(self, msg): if self.paras.verbose: sys.stdout.write("\033[K") # Clear line print('[{}] {}'.format(human_format(self.step), msg), end='\r')
56,870
223,317
150
python3.10.4/Lib/distutils/tests/test_sysconfig.py
46
24
def test_customize_compiler_before_get_config_vars(self): # Issue #21923: test that a Distribution compiler # instance can be called without an explicit call to # get_config_vars(). with open(TESTFN, 'w') as f: f.writelines(textwrap.dedent()) p = subprocess.Popen
add python 3.10.4 for windows
test_customize_compiler_before_get_config_vars
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
test_sysconfig.py
12
15
https://github.com/XX-net/XX-Net.git
1
82
0
43
137
Python
{ "docstring": "\\\n from distutils.core import Distribution\n config = Distribution().get_command_obj('config')\n # try_compile may pass or it may fail if no compiler\n # is found but it should not raise an exception.\n rc = config.try_compile('int x;')\n ", "language": "en", "n_whitespaces": 123, "n_words": 33, "vocab_size": 29 }
def test_customize_compiler_before_get_config_vars(self): # Issue #21923: test that a Distribution compiler # instance can be called without an explicit call to # get_config_vars(). with open(TESTFN, 'w') as f: f.writelines(textwrap.dedent()) p = subprocess.Popen([str(sys.executable), TESTFN], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) outs, errs = p.communicate() self.assertEqual(0, p.returncode, "Subprocess failed: " + outs)
@pytest.fixture
112,597
313,986
106
tests/components/zha/test_siren.py
15
10
def siren_platform_only(): with patch( "homeassistant.components.zha.PLATFORMS", ( Platform.DEVICE_TRACKER, Platform.NUMBER, Platform.SENSOR, Platform.SELECT,
Speed up zha tests (#73627)
siren_platform_only
4bc5d7bfed07c20d6f3438ab91c734a620505a33
core
test_siren.py
11
12
https://github.com/home-assistant/core.git
1
36
1
15
66
Python
{ "docstring": "Only setup the siren and required base platforms to speed up tests.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def siren_platform_only(): with patch( "homeassistant.components.zha.PLATFORMS", ( Platform.DEVICE_TRACKER, Platform.NUMBER, Platform.SENSOR, Platform.SELECT, Platform.SIREN, ), ): yield @pytest.fixture
72,113
248,119
191
tests/server_notices/test_resource_limits_server_notices.py
33
28
def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self): self._rlsn._auth.check_auth_blocking = Mock( return_va
Prefer `make_awaitable` over `defer.succeed` in tests (#12505) When configuring the return values of mocks, prefer awaitables from `make_awaitable` over `defer.succeed`. `Deferred`s are only awaitable once, so it is inappropriate for a mock to return the same `Deferred` multiple times. Also update `run_in_background` to support functions that return arbitrary awaitables. Signed-off-by: Sean Quah <[email protected]>
test_maybe_send_server_notice_when_alerting_suppressed_room_blocked
78b99de7c206b106340e12cdee0af9aa246bd5ad
synapse
test_resource_limits_server_notices.py
14
18
https://github.com/matrix-org/synapse.git
1
122
0
24
197
Python
{ "docstring": "\n When the room is already in a blocked state, test that when alerting\n is suppressed that the room is returned to an unblocked state.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 19 }
def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self): self._rlsn._auth.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock( return_value=make_awaitable((True, [])) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) self._rlsn._store.get_events = Mock( return_value=make_awaitable({"123": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_called_once()
53,786
215,066
212
tests/pytests/integration/modules/test_event.py
79
25
def test_send(event_listener, salt_master, salt_minion, salt_call_cli): event_tag = random_string("salt/test/event/") data = {"event.fire": "just test it!!!!"} start_time = time.time() ret = salt_call_cli.run( "event.send", event_tag, data=data, with_grains=True, with_pillar=True,
Fix salt-call event.send call with grains and pillar
test_send
374723c3abedee9ea5a399b566b460497b3885f6
salt
test_event.py
12
27
https://github.com/saltstack/salt.git
2
183
0
57
309
Python
{ "docstring": "\n Test sending an event to the master event bus\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 8 }
def test_send(event_listener, salt_master, salt_minion, salt_call_cli): event_tag = random_string("salt/test/event/") data = {"event.fire": "just test it!!!!"} start_time = time.time() ret = salt_call_cli.run( "event.send", event_tag, data=data, with_grains=True, with_pillar=True, preload={"foo": "bar"}, ) assert ret.exitcode == 0 assert ret.json assert ret.json is True event_pattern = (salt_master.id, event_tag) matched_events = event_listener.wait_for_events( [event_pattern], after_time=start_time, timeout=30 ) assert matched_events.found_all_events for event in matched_events: assert event.data["id"] == salt_minion.id assert event.data["cmd"] == "_minion_event" assert "event.fire" in event.data["data"] assert event.data["foo"] == "bar" assert event.data["data"]["grains"]["test_grain"] == "cheese" assert event.data["data"]["pillar"]["ext_spam"] == "eggs"
26,740
119,997
79
jax/_src/lax/control_flow.py
41
15
def _check_tree_and_avals(what, tree1, avals1, tree2, avals2): if tree1 != tree2: raise TypeError( f"{what} must have same type structure, got {tree1} and {tree2}.") if not all(_map(core.typematch, avals1, avals2)): diff = tree_map(_show_diff, tree_unflatten(tree1, avals1), tree_unflatten(tree2, avals2)) raise TypeError(f"{what} mus
Deprecate jax.tree_util.tree_multimap
_check_tree_and_avals
df1ceaeeb11efc7c5af1ad2dd102857128c23b26
jax
control_flow.py
12
8
https://github.com/google/jax.git
3
67
0
36
122
Python
{ "docstring": "Raises TypeError if (tree1, avals1) does not match (tree2, avals2).\n\n Corresponding `tree` and `avals` must match in the sense that the number of\n leaves in `tree` must be equal to the length of `avals`. `what` will be\n prepended to details of the mismatch in TypeError.\n ", "language": "en", "n_whitespaces": 49, "n_words": 45, "vocab_size": 33 }
def _check_tree_and_avals(what, tree1, avals1, tree2, avals2): if tree1 != tree2: raise TypeError( f"{what} must have same type structure, got {tree1} and {tree2}.") if not all(_map(core.typematch, avals1, avals2)): diff = tree_map(_show_diff, tree_unflatten(tree1, avals1), tree_unflatten(tree2, avals2)) raise TypeError(f"{what} must have identical types, got\n{diff}.")
26,359
118,684
257
lib/tests/streamlit/config_test.py
70
26
def test_load_global_local_flag_config(self): global_config = local_config = global_config_path = "/mock/home/folder/.streamlit/config.toml" local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml") global_open = mock_open(read_data=global_config) local_open = mock_open(read_data=local_config) open = mock_open() open.side_effect = [global_open.return_value, local_open.return_value] open_patch = patch("streamlit.config.open", open) # patch streamlit.*.os.* instead of os.* for py35 compat makedirs_patch = patch("streamlit.config.os.makedirs") makedirs_patch.return_value = True pathexists_patch = patch("streamlit.config.os.path.exists") pathexists_patch.side_effect =
Report sharing removal (#4260) The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.
test_load_global_local_flag_config
dd9084523e365e637443ea351eaaaa25f52d8412
streamlit
config_test.py
13
31
https://github.com/streamlit/streamlit.git
1
163
0
58
292
Python
{ "docstring": "Test that CLI flags have higher priority than both\n ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.\n \n [theme]\n base = \"dark\"\n font = \"sans serif\"\n textColor = \"#FFFFFF\"\n \n [theme]\n base = \"light\"\n font = \"serif\"\n ", "language": "en", "n_whitespaces": 112, "n_words": 33, "vocab_size": 26 }
def test_load_global_local_flag_config(self): global_config = local_config = global_config_path = "/mock/home/folder/.streamlit/config.toml" local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml") global_open = mock_open(read_data=global_config) local_open = mock_open(read_data=local_config) open = mock_open() open.side_effect = [global_open.return_value, local_open.return_value] open_patch = patch("streamlit.config.open", open) # patch streamlit.*.os.* instead of os.* for py35 compat makedirs_patch = patch("streamlit.config.os.makedirs") makedirs_patch.return_value = True pathexists_patch = patch("streamlit.config.os.path.exists") pathexists_patch.side_effect = lambda path: path in [ global_config_path, local_config_path, ] with open_patch, makedirs_patch, pathexists_patch: config.get_config_options(options_from_flags={"theme.font": "monospace"}) self.assertEqual("light", config.get_option("theme.base")) self.assertEqual("#FFFFFF", config.get_option("theme.textColor")) self.assertEqual("monospace", config.get_option("theme.font"))
@keep_lazy_text
51,670
206,738
52
django/utils/text.py
31
10
def get_valid_filename(name): s = str(name).strip().replace(" ", "_") s = re.sub(r"(?u)[^-\w.]", "", s) if s in {"", ".", ".."}: raise SuspiciousFileOperation("Could not derive file name from '%s'" % name) return s @keep_lazy_text
Refs #33476 -- Reformatted code with Black.
get_valid_filename
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
text.py
12
6
https://github.com/django/django.git
2
56
1
27
106
Python
{ "docstring": "\n Return the given string converted to a string that can be used for a clean\n filename. Remove leading and trailing spaces; convert other spaces to\n underscores; and remove anything that is not an alphanumeric, dash,\n underscore, or dot.\n >>> get_valid_filename(\"john's portrait in 2004.jpg\")\n 'johns_portrait_in_2004.jpg'\n ", "language": "en", "n_whitespaces": 66, "n_words": 44, "vocab_size": 39 }
def get_valid_filename(name): s = str(name).strip().replace(" ", "_") s = re.sub(r"(?u)[^-\w.]", "", s) if s in {"", ".", ".."}: raise SuspiciousFileOperation("Could not derive file name from '%s'" % name) return s @keep_lazy_text
51,826
206,991
159
tests/admin_changelist/tests.py
47
33
def test_no_duplicates_for_m2m_in_list_filter(self): blues = Genre.objects.create(name="Blues") band = Band.objects.create(name="B.B. King Review", nr_of_members=11) band.genres.add(blues) band.genres.add(blues) m = BandAdmin(Band, custom_site) request = self.factory.get("/band/", data={"genres": blues.pk}) request.user = self.superuser cl = m.get_changelist_instance(request) cl.get_results(request) # There's only one Group instance self.assertEqual(c
Refs #33476 -- Reformatted code with Black.
test_no_duplicates_for_m2m_in_list_filter
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
12
14
https://github.com/django/django.git
1
144
0
40
238
Python
{ "docstring": "\n Regression test for #13902: When using a ManyToMany in list_filter,\n results shouldn't appear more than once. Basic ManyToMany.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
def test_no_duplicates_for_m2m_in_list_filter(self): blues = Genre.objects.create(name="Blues") band = Band.objects.create(name="B.B. King Review", nr_of_members=11) band.genres.add(blues) band.genres.add(blues) m = BandAdmin(Band, custom_site) request = self.factory.get("/band/", data={"genres": blues.pk}) request.user = self.superuser cl = m.get_changelist_instance(request) cl.get_results(request) # There's only one Group instance self.assertEqual(cl.result_count, 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0)
40,126
167,798
116
pandas/core/ops/methods.py
45
11
def add_flex_arithmetic_methods(cls) -> None: flex_arith_method, flex_comp_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method) new_methods.update( { "multiply": new_methods["mul"], "subtract": new_methods["sub"], "divide": new_methods["div"], } ) # opt out of bool flex methods for now assert not any
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
add_flex_arithmetic_methods
f65417656ba8c59438d832b6e2a431f78d40c21c
pandas
methods.py
11
21
https://github.com/pandas-dev/pandas.git
2
80
0
40
138
Python
{ "docstring": "\n Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)\n to the class.\n\n Parameters\n ----------\n cls : class\n flex methods will be defined and pinned to this class\n ", "language": "en", "n_whitespaces": 55, "n_words": 29, "vocab_size": 24 }
def add_flex_arithmetic_methods(cls) -> None: flex_arith_method, flex_comp_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method) new_methods.update( { "multiply": new_methods["mul"], "subtract": new_methods["sub"], "divide": new_methods["div"], } ) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_")) _add_methods(cls, new_methods=new_methods)
52,889
210,309
83
deploy/python/action_utils.py
19
9
def get_collected_keypoint(self): output = [] for tracker_id in self.id_to_pop: output.append([tracker_id, self.keypoint_saver[tracker_id]]) del (self.keypoint_saver[tracker_id]) self.flag_to_pop = False self.id_to
Pipeline with kpt and act (#5399) * add keypoint infer and visualize into Pipeline * add independent action model inference * add action inference into pipeline, still in working * test different display frames and normalization methods * use bbox and scale normalization * Remove debug info and Optimize code structure * remove useless visual param * make action parameters configurable
get_collected_keypoint
7018dad10757b6d414f1b00a547244bced596d68
PaddleDetection
action_utils.py
12
8
https://github.com/PaddlePaddle/PaddleDetection.git
2
55
0
17
88
Python
{ "docstring": "\n Output (List): List of keypoint results for Action Recognition task, where \n the format of each element is [tracker_id, KeyPointSequence of tracker_id]\n ", "language": "en", "n_whitespaces": 67, "n_words": 21, "vocab_size": 19 }
def get_collected_keypoint(self): output = [] for tracker_id in self.id_to_pop: output.append([tracker_id, self.keypoint_saver[tracker_id]]) del (self.keypoint_saver[tracker_id]) self.flag_to_pop = False self.id_to_pop.clear() return output
@frappe.whitelist()
13,546
63,987
19
erpnext/education/api.py
28
16
def get_course_schedule_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions conditions = get_event_conditions("Course Schedule", filters) data = frappe.db.sql(.format(conditions=conditions), { "start": start,
fix: from time and to time not updated in drag and drop action #29114 fix: from time and to time not updated in drag and drop action
get_course_schedule_events
8b5827ed6db1041526b6440ca8e4fde19c646e1e
erpnext
api.py
12
14
https://github.com/frappe/erpnext.git
1
69
1
26
123
Python
{ "docstring": "Returns events for Course Schedule Calendar view rendering.\n\n\t:param start: Start date-time.\n\t:param end: End date-time.\n\t:param filters: Filters (JSON).\n\tselect name, course, color,\n\t\t\ttimestamp(schedule_date, from_time) as from_time,\n\t\t\ttimestamp(schedule_date, to_time) as to_time,\n\t\t\troom, student_group, 0 as 'allDay'\n\t\tfrom `tabCourse Schedule`\n\t\twhere ( schedule_date between %(start)s and %(end)s )\n\t\t{conditions}", "language": "en", "n_whitespaces": 38, "n_words": 49, "vocab_size": 43 }
def get_course_schedule_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions conditions = get_event_conditions("Course Schedule", filters) data = frappe.db.sql(.format(conditions=conditions), { "start": start, "end": end }, as_dict=True, update={"allDay": 0}) return data @frappe.whitelist()
40,008
167,425
64
pandas/io/json/_json.py
21
11
def check_keys_split(self, decoded) -> None: bad_keys = set(decoded.keys()).difference(set(self._split_
TYP: Return annotations for io/{formats,json} (#47516) * TYP: Return annotations for io/{formats,json} * flake8 * explicitly check whether width is None
check_keys_split
734db4f1fde2566a02b3c7ff661a479b0a71633c
pandas
_json.py
12
8
https://github.com/pandas-dev/pandas.git
2
47
0
20
85
Python
{ "docstring": "\n Checks that dict has only the appropriate keys for orient='split'.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def check_keys_split(self, decoded) -> None: bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys_joined = ", ".join(bad_keys) raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
51,013
205,092
210
django/db/backends/oracle/operations.py
53
13
def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value
Refs #33476 -- Reformatted code with Black.
adapt_datetimefield_value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
operations.py
14
13
https://github.com/django/django.git
5
66
0
42
112
Python
{ "docstring": "\n Transform a datetime value to an object compatible with what is expected\n by the backend driver for datetime columns.\n\n If naive datetime is passed assumes that is in UTC. Normally Django\n models.DateTimeField makes sure that if USE_TZ is True passed datetime\n is timezone aware.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 35 }
def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, "resolve_expression"): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( "Oracle backend does not support timezone-aware datetimes when USE_TZ is False." ) return Oracle_datetime.from_datetime(value)
54,764
217,412
493
python3.10.4/Lib/fractions.py
67
14
def __pow__(a, b): if isinstance(b, numbers.Rational): if b.denominator == 1: power = b.numerator if power >= 0: return Fraction(a._numerator ** power, a._denominator ** power,
add python 3.10.4 for windows
__pow__
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
fractions.py
18
20
https://github.com/XX-net/XX-Net.git
5
132
0
39
208
Python
{ "docstring": "a ** b\n\n If b is not an integer, the result will be a float or complex\n since roots are generally irrational. If b is an integer, the\n result will be rational.\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 32, "vocab_size": 21 }
def __pow__(a, b): if isinstance(b, numbers.Rational): if b.denominator == 1: power = b.numerator if power >= 0: return Fraction(a._numerator ** power, a._denominator ** power, _normalize=False) elif a._numerator >= 0: return Fraction(a._denominator ** -power, a._numerator ** -power, _normalize=False) else: return Fraction((-a._denominator) ** -power, (-a._numerator) ** -power, _normalize=False) else: # A fractional power will generally produce an # irrational number. return float(a) ** float(b) else: return float(a) ** b
76,483
260,777
39
sklearn/linear_model/_ridge.py
11
7
def fit(self, X, y, sample_weight=None): self._validate_params() super().fit(X, y, sa
MAINT Parameters validation for RidgeCV and RidgeClassifierCV (#24184) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
fit
d593606a8267a325d98b1e9a57de6b7b87442f55
scikit-learn
_ridge.py
9
4
https://github.com/scikit-learn/scikit-learn.git
1
35
0
10
55
Python
{ "docstring": "Fit Ridge regression model with cv.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data. If using GCV, will be cast to float64\n if necessary.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n Fitted estimator.\n\n Notes\n -----\n When sample_weight is provided, the selected hyperparameter may depend\n on whether we use leave-one-out cross-validation (cv=None or cv='auto')\n or another form of cross-validation, because only leave-one-out\n cross-validation takes the sample weights into account when computing\n the validation score.\n ", "language": "en", "n_whitespaces": 296, "n_words": 118, "vocab_size": 89 }
def fit(self, X, y, sample_weight=None): self._validate_params() super().fit(X, y, sample_weight=sample_weight) return self
2,943
19,355
127
PathPlanning/CubicSpline/cubic_spline_planner.py
45
11
def calc_position(self, x): if x < self.x[0]: return None elif x > self.x[-1]:
enhance cubic spline path doc (#698) * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc
calc_position
def289b723e9216830c2a7b2577cb31b55710167
PythonRobotics
cubic_spline_planner.py
12
10
https://github.com/AtsushiSakai/PythonRobotics.git
3
97
0
29
141
Python
{ "docstring": "\n Calc `y` position for given `x`.\n\n if `x` is outside the data point's `x` range, return None.\n\n Returns\n -------\n y : float\n y position for given x.\n ", "language": "en", "n_whitespaces": 81, "n_words": 27, "vocab_size": 22 }
def calc_position(self, x): if x < self.x[0]: return None elif x > self.x[-1]: return None i = self.__search_index(x) dx = x - self.x[i] position = self.a[i] + self.b[i] * dx + \ self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0 return position
85,928
286,612
373
openbb_terminal/portfolio/attribution_model.py
133
14
def get_daily_sector_prices(start_date, end_date) -> dict: # sector ticker information sp500_tickers = { "S&P 500 Materials (Sector)": "^SP500-15", "S&P 500 Industrials (Sector)": "^SP500-20", "S&P 500 Consumer Discretionary (Sector)": "^SP500-25", "S&P 500 Consumer Staples (Sector)": "^SP500-30", "S&P 500 Health Care (Sector)": "^SP500-35", "S&P 500 Financials (Sector)": "^SP500-40", "S&P 500 Information Technology (Sector)": "^SP500-45", "S&P 500 Telecommunication Services (Sector)": "^SP500-50",
[IMPROVE] Fix Docstring formatting/Fix missing, incomplete type hints (#3412) * Fixes * Update stocks_helper.py * update git-actions set-output to new format * Update stocks_helper.py * Update terminal_helper.py * removed LineAnnotateDrawer from qa_view * lint * few changes * updates * sdk auto gen modules done * Update stocks_helper.py * updates to changed imports, and remove first sdk_modules * Update generate_sdk.py * Update generate_sdk.py * pylint * revert stocks_helper * Update generate_sdk.py * Update sdk.py * Update generate_sdk.py * full auto generation, added sdk.py/controllers creation * missed enable forecasting * added running black in subprocess after sdk files generation completes * removed deleted sdk_arg_logger * comment out tests * property doc fix * clean up * Update generate_sdk.py * make trailmap classes useable for doc generation * Update generate_sdk.py * added lineon to trailmap class for linking to func in markdown * changed lineon to dict * added full_path to trailmap for linking in docs * updated portfolio * feat: initial files * feat: added meta head * feat: added funcdef * added func_def to trailmap attributes for markdown in docs, added missing type hints to covid functions * feat: added view and merged with jaun * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * init * fix returns * fix: random stuff * fix: random * fixed encoding issue on windows * fix: generate tabs * update * Update generate_sdk_markdown.py * Create .pydocstyle.ini * added type hint classes for views * fixes * alt, ba * alt-economy * Update finviz_compare_model.py * fixs * Update substack_model.py * Update generate_sdk.py * last of my section * porfolio * po * Update optimizer_model.py * fixing more things * few more * keys done * update * fixes * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * mypy forecast fix * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * fixes * forecast fixes * one more fix * Update coinbase_model.py * Update generate_sdk_markdown.py Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: James Maslek <[email protected]> Co-authored-by: jose-donato <[email protected]> Co-authored-by: andrewkenreich <[email protected]>
get_daily_sector_prices
59d8b36bb0467a1a99513b10e8b8471afaa56fd6
OpenBBTerminal
attribution_model.py
14
45
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
109
0
82
207
Python
{ "docstring": "\n fetches daily sector prices for S&P500 for a fixed time period\n\n Parameters\n ----------\n start_date : str ('yyyy-mm-dd') or datetime.date\n start date for fetching data\n end_date : str ('yyyy-mm-dd') or datetime.date\n end date for fetching data\n\n Returns\n -------\n sp500_tickers_data : dict\n dictionary of dataframes with SPY daily sector prices\n ", "language": "en", "n_whitespaces": 97, "n_words": 48, "vocab_size": 33 }
def get_daily_sector_prices(start_date, end_date) -> dict: # sector ticker information sp500_tickers = { "S&P 500 Materials (Sector)": "^SP500-15", "S&P 500 Industrials (Sector)": "^SP500-20", "S&P 500 Consumer Discretionary (Sector)": "^SP500-25", "S&P 500 Consumer Staples (Sector)": "^SP500-30", "S&P 500 Health Care (Sector)": "^SP500-35", "S&P 500 Financials (Sector)": "^SP500-40", "S&P 500 Information Technology (Sector)": "^SP500-45", "S&P 500 Telecommunication Services (Sector)": "^SP500-50", "S&P 500 Utilities (Sector)": "^SP500-55", "S&P 500 Real Estate (Sector)": "^SP500-60", "S&P 500 Energy (Sector)": "^GSPE", } sp500_tickers_data = {} # to store data for ( sector, sector_ticker, ) in sp500_tickers.items(): # iterate thru the sectors # load the data required from yfinance sp500_tickers_data[ sector ] = { # builds a dictionary entry for the sector with adj close data "sector_data": yf.download( sector_ticker, start=start_date, end=end_date, progress=False, )["Adj Close"] } # stores the data here return sp500_tickers_data
46,162
189,675
123
manim/mobject/geometry/arc.py
38
17
def get_unpositioned_tip(self, tip_shape=None, tip_length=None): from manim.mobject.geometry.tips import ArrowTriangleFilledTip if tip_shape is None: tip_shape = ArrowTriangleFilledTip if tip_length is None: tip_length = self.get_default_tip_length() color = self.get_color() style = {"fill_c
Improved structure of the :mod:`.mobject` module (#2476) * group graphing and update its references * group text and update its references * group opengl and update its references * group three_d and update its references * group geometry and update (most) references * move some chaning.py + updater files into animation * refactor arc.py * refactor line.py * refactor polygram.py * refactor tips.py * black + isort * import new files in __init__.py * refactor places where geometry was used * black + isort again * remove unused imports * update reference.rst * add descriptions to files * fix circular imports * forgot ArrowTip * fix tests * fix doctests * satisfy mypy? * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix ALL merge conflicts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * one VMobject import slipped through * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * re-add imports to `manim/opengl/__init__.py` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix reference manual * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ignore unknown directive type * fix arrow tip imports in docstrings Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
get_unpositioned_tip
e040bcacd38378386749db18aeba575b93f4ebca
manim
arc.py
10
11
https://github.com/ManimCommunity/manim.git
3
83
0
27
134
Python
{ "docstring": "\n Returns a tip that has been stylistically configured,\n but has not yet been given a position in space.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 15 }
def get_unpositioned_tip(self, tip_shape=None, tip_length=None): from manim.mobject.geometry.tips import ArrowTriangleFilledTip if tip_shape is None: tip_shape = ArrowTriangleFilledTip if tip_length is None: tip_length = self.get_default_tip_length() color = self.get_color() style = {"fill_color": color, "stroke_color": color} style.update(self.tip_style) tip = tip_shape(length=tip_length, **style) return tip
92,372
293,309
84
tests/components/todoist/test_calendar.py
34
10
def test_parse_due_date_without_timezone_uses_offset(): data: DueDate = { "date": "2022-02-02T14:00:00", "is_recurring": False, "lang": "en", "string": "Feb 2 2:00 PM", "timezone": None, } actual = _parse_due_date(data, timezone_offset=-8) assert dat
Fix todoist parsing due dates for calendar events (#65403)
test_parse_due_date_without_timezone_uses_offset
d302b0d14e9df9cc46e7e035a0d2be5290182b40
core
test_calendar.py
10
10
https://github.com/home-assistant/core.git
1
65
0
30
109
Python
{ "docstring": "Test due date uses user local timezone offset when it has no timezone.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def test_parse_due_date_without_timezone_uses_offset(): data: DueDate = { "date": "2022-02-02T14:00:00", "is_recurring": False, "lang": "en", "string": "Feb 2 2:00 PM", "timezone": None, } actual = _parse_due_date(data, timezone_offset=-8) assert datetime(2022, 2, 2, 22, 0, 0, tzinfo=dt.UTC) == actual
@add_start_docstrings( """TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", HUBERT_START_DOCSTRING, )
5,924
32,426
69
src/transformers/models/hubert/modeling_tf_hubert.py
27
14
def serving_output(self, output): hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states e
Update serving code to enable `saved_model=True` (#18153) * Add serving_output and serving methods to some vision models * Add serving outputs for DeiT * Don't convert hidden states - differing shapes * Make saveable * Fix up * Make swin saveable * Add in tests * Fix funnel tests (can't convert to tensor) * Fix numpy call * Tidy up a bit * Add in hidden states - resnet * Remove numpy * Fix failing tests - tensor shape and skipping tests * Remove duplicated function * PR comments - formatting and var names * PR comments Add suggestions made by Joao Gante: * Use tf.shape instead of shape_list * Use @tooslow decorator on tests * Simplify some of the logic * PR comments Address Yih-Dar Sheih comments - making tensor names consistent and make types float * Types consistent with docs; disable test on swin (slow) * CI trigger * Change input_features to float32 * Add serving_output for segformer * Fixup Co-authored-by: Amy Roberts <[email protected]>
serving_output
8e8384663d716d4b5a4f510070ff954fc0ba4a52
transformers
modeling_tf_hubert.py
10
6
https://github.com/huggingface/transformers.git
3
60
1
22
103
Python
{ "docstring": "TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
def serving_output(self, output): hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutput( last_hidden_state=output.last_hidden_state, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( , HUBERT_START_DOCSTRING, )
75,671
259,240
256
sklearn/utils/_encode.py
111
17
def _unique_np(values, return_inverse=False, return_counts=False): uniques = np.unique( values, return_inverse=return_inverse, return_counts=return_counts ) inverse, counts = None, None if return_counts: *uniques, counts = uniques if return_inverse: *uniques, inverse = uniques if return_counts or return_inverse: uniques = uniques[0] # np.unique will have duplicate missing values at the end of `uniques` # here we clip the nans and remove it from uniques if uniques.size and is_scalar_nan(uniques[-1]): nan_idx = np.searchsorted(uniques, np.nan) uniques = uniques[: nan_idx + 1] if return_inverse: inverse[inverse > nan_
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
_unique_np
7f0006c8aad1a09621ad19c3db19c3ff0555a183
scikit-learn
_encode.py
14
25
https://github.com/scikit-learn/scikit-learn.git
12
177
0
68
276
Python
{ "docstring": "Helper function to find unique values for numpy arrays that correctly\n accounts for nans. See `_unique` documentation for details.", "language": "en", "n_whitespaces": 21, "n_words": 19, "vocab_size": 17 }
def _unique_np(values, return_inverse=False, return_counts=False): uniques = np.unique( values, return_inverse=return_inverse, return_counts=return_counts ) inverse, counts = None, None if return_counts: *uniques, counts = uniques if return_inverse: *uniques, inverse = uniques if return_counts or return_inverse: uniques = uniques[0] # np.unique will have duplicate missing values at the end of `uniques` # here we clip the nans and remove it from uniques if uniques.size and is_scalar_nan(uniques[-1]): nan_idx = np.searchsorted(uniques, np.nan) uniques = uniques[: nan_idx + 1] if return_inverse: inverse[inverse > nan_idx] = nan_idx if return_counts: counts[nan_idx] = np.sum(counts[nan_idx:]) counts = counts[: nan_idx + 1] ret = (uniques,) if return_inverse: ret += (inverse,) if return_counts: ret += (counts,) return ret[0] if len(ret) == 1 else ret
100,006
301,158
27
homeassistant/components/logbook/processor.py
6
6
def switch_to_live(self) -> None: self.logbook_run.event_cache.clear() self.logbook_run.context_lookup.clear()
Add live streaming logbook websocket endpoint (#72258) Co-authored-by: Paulus Schoutsen <[email protected]>
switch_to_live
9c3f9491651f409e8b4d0d645115b55b14f06165
core
processor.py
9
7
https://github.com/home-assistant/core.git
1
26
0
6
46
Python
{ "docstring": "Switch to live stream.\n\n Clear caches so we can reduce memory pressure.\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
def switch_to_live(self) -> None: self.logbook_run.event_cache.clear() self.logbook_run.context_lookup.clear()
121,062
337,460
13
src/accelerate/test_utils/testing.py
7
5
def require_comet_ml(test_case): retu
Clean up tests + fix import (#330)
require_comet_ml
e5c17f36a8b5bf8b9478d416c4a80841a353fb19
accelerate
testing.py
10
2
https://github.com/huggingface/accelerate.git
1
20
0
7
37
Python
{ "docstring": "\n Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 15 }
def require_comet_ml(test_case): return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
7,303
39,972
65
dash/_callback_context.py
15
7
def triggered_id(self): component_id = None if self.triggered: prop_id = self.triggered_prop_ids.first()
added docstrings
triggered_id
67f56d09d70e77701d2ae9a002aa330202da118b
dash
_callback_context.py
11
6
https://github.com/plotly/dash.git
2
33
0
11
56
Python
{ "docstring": "\n Returns the component id (str or dict) of the Input component that triggered the callback.\n\n Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if\n multiple Inputs triggered the callback.\n\n Example usage:\n `if \"btn-1\" == ctx.triggered_id:\n do_something()`\n\n ", "language": "en", "n_whitespaces": 101, "n_words": 47, "vocab_size": 32 }
def triggered_id(self): component_id = None if self.triggered: prop_id = self.triggered_prop_ids.first() component_id = self.triggered_prop_ids[prop_id] return component_id
75,878
259,703
228
sklearn/decomposition/_nmf.py
80
28
def _solve_W(self, X, H, max_iter): avg = np.sqrt(X.mean() / self._n_components) W = np.full((X.shape[0], self._n_components), avg
FEA Online implementation of non-negative matrix factorization (#16948) Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
_solve_W
69132ebbd39f070590ca01813340b5b12c0d02ab
scikit-learn
_nmf.py
12
14
https://github.com/scikit-learn/scikit-learn.git
4
148
0
62
224
Python
{ "docstring": "Minimize the objective function w.r.t W.\n\n Update W with H being fixed, until convergence. This is the heart\n of `transform` but it's also used during `fit` when doing fresh restarts.\n ", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 29 }
def _solve_W(self, X, H, max_iter): avg = np.sqrt(X.mean() / self._n_components) W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype) W_buffer = W.copy() # Get scaled regularization terms. Done for each minibatch to take into account # variable sizes of minibatches. l1_reg_W, _, l2_reg_W, _ = self._scale_regularization(X) for _ in range(max_iter): W, *_ = _multiplicative_update_w( X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma ) W_diff = linalg.norm(W - W_buffer) / linalg.norm(W) if self.tol > 0 and W_diff <= self.tol: break W_buffer[:] = W return W
53,006
211,032
74
deploy/pptracking/python/mot/tracker/ocsort_tracker.py
49
12
def convert_bbox_to_z(bbox): w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2. y = bbox[1] + h / 2. s = w * h # scale is just area
[MOT] Add OC_SORT tracker (#6272) * add ocsort tracker * add ocsort deploy * merge develop * fix ocsort tracker codes * fix doc, test=document_fix * fix doc, test=document_fix
convert_bbox_to_z
c84153a355d9855fe55cf51d203b8b24e7d884e5
PaddleDetection
ocsort_tracker.py
10
8
https://github.com/PaddlePaddle/PaddleDetection.git
1
91
0
31
133
Python
{ "docstring": "\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n ", "language": "en", "n_whitespaces": 51, "n_words": 34, "vocab_size": 22 }
def convert_bbox_to_z(bbox): w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2. y = bbox[1] + h / 2. s = w * h # scale is just area r = w / float(h + 1e-6) return np.array([x, y, s, r]).reshape((4, 1))
4,786
24,770
126
ppstructure/recovery/table_process.py
82
6
def remove_whitespace(string, leading=False, trailing=False): # Remove any leading new line characters along with any surrounding white space if leading: string = re.sub(r'^\s*\n+\s*', '', string) # Remove an
update recovery (#7259) * update recovery * update recovery * update recovery * update recovery * update recovery
remove_whitespace
b7d99acd2e06945c789312cda70d60b7c8a5b0d0
PaddleOCR
table_process.py
11
7
https://github.com/PaddlePaddle/PaddleOCR.git
3
71
0
50
136
Python
{ "docstring": "Remove white space from a string.\n Args:\n string(str): The string to remove white space from.\n leading(bool, optional): Remove leading new lines when True.\n trailing(bool, optional): Remove trailing new lines when False.\n Returns:\n str: The input string with new line characters removed and white space squashed.\n Examples:\n Single or multiple new line characters are replaced with space.\n >>> remove_whitespace(\"abc\\\\ndef\")\n 'abc def'\n >>> remove_whitespace(\"abc\\\\n\\\\n\\\\ndef\")\n 'abc def'\n New line characters surrounded by white space are replaced with a single space.\n >>> remove_whitespace(\"abc \\\\n \\\\n \\\\n def\")\n 'abc def'\n >>> remove_whitespace(\"abc \\\\n \\\\n \\\\n def\")\n 'abc def'\n Leading and trailing new lines are replaced with a single space.\n >>> remove_whitespace(\"\\\\nabc\")\n ' abc'\n >>> remove_whitespace(\" \\\\n abc\")\n ' abc'\n >>> remove_whitespace(\"abc\\\\n\")\n 'abc '\n >>> remove_whitespace(\"abc \\\\n \")\n 'abc '\n Use ``leading=True`` to remove leading new line characters, including any surrounding\n white space:\n >>> remove_whitespace(\"\\\\nabc\", leading=True)\n 'abc'\n >>> remove_whitespace(\" \\\\n abc\", leading=True)\n 'abc'\n Use ``trailing=True`` to remove trailing new line characters, including any surrounding\n white space:\n >>> remove_whitespace(\"abc \\\\n \", trailing=True)\n 'abc'\n ", "language": "en", "n_whitespaces": 509, "n_words": 166, "vocab_size": 73 }
def remove_whitespace(string, leading=False, trailing=False): # Remove any leading new line characters along with any surrounding white space if leading: string = re.sub(r'^\s*\n+\s*', '', string) # Remove any trailing new line characters along with any surrounding white space if trailing: string = re.sub(r'\s*\n+\s*$', '', string) # Replace new line characters and absorb any surrounding space. string = re.sub(r'\s*\n\s*', ' ', string) # TODO need some way to get rid of extra spaces in e.g. text <span> </span> text return re.sub(r'\s+', ' ', string)
117,347
320,780
92
qutebrowser/completion/completionwidget.py
21
12
def selectionChanged(self, selected, deselected): if not self._active: return super().selectionChanged(selected, deselected)
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
selectionChanged
a20bb67a878b2e68abf8268c1b0a27f018d01352
qutebrowser
completionwidget.py
12
9
https://github.com/qutebrowser/qutebrowser.git
3
65
0
17
108
Python
{ "docstring": "Extend selectionChanged to call completers selection_changed.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
def selectionChanged(self, selected, deselected): if not self._active: return super().selectionChanged(selected, deselected) indexes = selected.indexes() if not indexes: return data = str(self._model().data(indexes[0])) self.selection_changed.emit(data)
48,862
198,300
72
sympy/physics/vector/vector.py
26
10
def __mul__(self, other): newlist = [v for v in self.args] other = sympif
Use sympify less
__mul__
2a1afca9477eb781f16d5d6b63fa37abed7740a3
sympy
vector.py
12
6
https://github.com/sympy/sympy.git
3
64
0
21
97
Python
{ "docstring": "Multiplies the Vector by a sympifyable expression.\n\n Parameters\n ==========\n\n other : Sympifyable\n The scalar to multiply this Vector with\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy import Symbol\n >>> N = ReferenceFrame('N')\n >>> b = Symbol('b')\n >>> V = 10 * b * N.x\n >>> print(V)\n 10*b*N.x\n\n ", "language": "en", "n_whitespaces": 152, "n_words": 50, "vocab_size": 38 }
def __mul__(self, other): newlist = [v for v in self.args] other = sympify(other) for i, v in enumerate(newlist): newlist[i] = (other * newlist[i][0], newlist[i][1]) return Vector(newlist)
33,631
146,201
20
python/ray/serve/application.py
6
7
def to_dict(self) -> Dict: return serve_application_to_schema(self._deployments.values()).dict()
[serve] Implement Serve Application object (#22917) The concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it.
to_dict
1100c982223757f697a410a0d0c3d8bf3ff9c805
ray
application.py
12
10
https://github.com/ray-project/ray.git
1
23
0
6
41
Python
{ "docstring": "Returns this Application's deployments as a dictionary.\n\n This dictionary adheres to the Serve REST API schema. It can be deployed\n via the Serve REST API.\n\n Returns:\n Dict: The Application's deployments formatted in a dictionary.\n ", "language": "en", "n_whitespaces": 73, "n_words": 34, "vocab_size": 27 }
def to_dict(self) -> Dict: return serve_application_to_schema(self._deployments.values()).dict()
14,740
68,207
117
erpnext/hr/doctype/shift_assignment/shift_assignment.py
149
25
def get_shift_details(shift_type_name, for_timestamp=None): if not shift_type_name: return None if not for_timestamp: for_timestamp = now_datetime() shift_type = frappe.get_doc('Shift Type', shift_type_name) shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) if shift_type.start_time > shift_type.end_time: # shift spans accross 2 different days if get_time(for_timestamp.time()) >= get_time(shift_actual_start): # if for_timestamp is greater than start time, its in the first day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time for_timestamp = for_timestamp + timedelta(days=1) end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time elif get_time(for_timestamp.time()) < get_time(shift_actual_start): # if for_timestamp is less than start time, its in the second day end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time for_timestamp = for_timestamp + timedelta(days=-1) start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time else: # start and end times fall on the same day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time) return frappe._dict({ 'shift_type': shift_type, 'start_datetime': start_datetime, 'end_datetime': end_da
refactor: handle shifts spanning over 2 different days
get_shift_details
62e72752dce92792166f9b734c2306adb4b41147
erpnext
shift_assignment.py
17
28
https://github.com/frappe/erpnext.git
6
282
0
75
460
Python
{ "docstring": "Returns Shift Details which contain some additional information as described below.\n\t'shift_details' contains the following keys:\n\t 'shift_type' - Object of DocType Shift Type,\n\t 'start_datetime' - Date and Time of shift start on given date,\n\t 'end_datetime' - Date and Time of shift end on given date,\n\t 'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time',\n\t 'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero)\n\n\t:param shift_type_name: shift type name for which shift_details is required.\n\t:param for_timestamp: DateTime value on which shift_details are required\n\t", "language": "en", "n_whitespaces": 119, "n_words": 88, "vocab_size": 57 }
def get_shift_details(shift_type_name, for_timestamp=None): if not shift_type_name: return None if not for_timestamp: for_timestamp = now_datetime() shift_type = frappe.get_doc('Shift Type', shift_type_name) shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) if shift_type.start_time > shift_type.end_time: # shift spans accross 2 different days if get_time(for_timestamp.time()) >= get_time(shift_actual_start): # if for_timestamp is greater than start time, its in the first day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time for_timestamp = for_timestamp + timedelta(days=1) end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time elif get_time(for_timestamp.time()) < get_time(shift_actual_start): # if for_timestamp is less than start time, its in the second day end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time for_timestamp = for_timestamp + timedelta(days=-1) start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time else: # start and end times fall on the same day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time) return frappe._dict({ 'shift_type': shift_type, 'start_datetime': start_datetime, 'end_datetime': end_datetime, 'actual_start': actual_start, 'actual_end': actual_end })
23,212
108,485
116
lib/matplotlib/testing/__init__.py
35
22
def subprocess_run_helper(func, *args, timeout, extra_env=None): target = func.__name__ module = func.__module__ proc = subprocess.run( [sys.executable, "-c", f"from {module} import {target}; {target}()", *args], env={**os.environ, "SOURCE_DATE_EPOCH": "0", **(extra_env or {})}, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc
Tweak subprocess_run_helper. On general grounds, an API like `subprocess_run_helper(func, *args, timeout, **extra_env)` is problematic because it prevents one from passing an environment variable called "timeout". Instead, pass the extra environment variables as a dict, without unpacking. (Technically this has been released in 3.5.2 as public API, but 1) I'm not really sure it should have been a public API to start with (should we deprecate it and make it private?), and 2) hopefully tweaking that in 3.5.3 with no deprecation is not going to disrupt anyone... I can still put in a changelog entry if that's preferred.)
subprocess_run_helper
031093e6f05496f55616a1fa2f39e573fea02828
matplotlib
__init__.py
14
14
https://github.com/matplotlib/matplotlib.git
2
92
0
32
151
Python
{ "docstring": "\n Run a function in a sub-process.\n\n Parameters\n ----------\n func : function\n The function to be run. It must be in a module that is importable.\n *args : str\n Any additional command line arguments to be passed in\n the first argument to ``subprocess.run``.\n extra_env : dict[str, str]\n Any additional environment variables to be set for the subprocess.\n ", "language": "en", "n_whitespaces": 107, "n_words": 56, "vocab_size": 39 }
def subprocess_run_helper(func, *args, timeout, extra_env=None): target = func.__name__ module = func.__module__ proc = subprocess.run( [sys.executable, "-c", f"from {module} import {target}; {target}()", *args], env={**os.environ, "SOURCE_DATE_EPOCH": "0", **(extra_env or {})}, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc
26,297
118,567
29
lib/streamlit/server/server.py
8
7
def add_preheated_app_session(self) -> None: session = self._create_or_reuse_app_session(ws=None) session.handle_rerun_script_request(is_preheat=True)
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
add_preheated_app_session
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
server.py
9
8
https://github.com/streamlit/streamlit.git
1
26
0
8
45
Python
{ "docstring": "Register a fake browser with the server and run the script.\n\n This is used to start running the user's script even before the first\n browser connects.\n ", "language": "en", "n_whitespaces": 47, "n_words": 26, "vocab_size": 22 }
def add_preheated_app_session(self) -> None: session = self._create_or_reuse_app_session(ws=None) session.handle_rerun_script_request(is_preheat=True)
43,599
181,815
312
tpot/base.py
88
17
def score(self, testing_features, testing_target): if self.fitted_pipeline_ is None: raise RuntimeError( "A pipeline has not yet been optimized. Please call fit() first." ) testing_feature
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
score
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
base.py
11
22
https://github.com/EpistasisLab/tpot.git
4
105
0
64
168
Python
{ "docstring": "Return the score on the given testing data using the user-specified scoring function.\n\n Parameters\n ----------\n testing_features: array-like {n_samples, n_features}\n Feature matrix of the testing set\n testing_target: array-like {n_samples}\n List of class labels for prediction in the testing set\n\n Returns\n -------\n accuracy_score: float\n The estimated test set accuracy\n\n ", "language": "en", "n_whitespaces": 136, "n_words": 47, "vocab_size": 37 }
def score(self, testing_features, testing_target): if self.fitted_pipeline_ is None: raise RuntimeError( "A pipeline has not yet been optimized. Please call fit() first." ) testing_features, testing_target = self._check_dataset( testing_features, testing_target, sample_weight=None ) # If the scoring function is a string, we must adjust to use the sklearn # scoring interface if isinstance(self.scoring_function, str): scorer = SCORERS[self.scoring_function] elif callable(self.scoring_function): scorer = self.scoring_function else: raise RuntimeError( "The scoring function should either be the name of a scikit-learn scorer or a scorer object" ) score = scorer( self.fitted_pipeline_, testing_features.astype(np.float64), testing_target.astype(np.float64), ) return score
89,038
289,912
85
tests/util/test_unit_system.py
24
24
def test_as_dict(): expected = { LENGTH: UnitOfLength.KILOMETERS, WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND, TEMPERATURE: UnitOfTemperature.CELSIUS, VOLUME: UnitOfVolume.LITERS, MASS: U
Use unit enums in unit utilities (#81030)
test_as_dict
2a2e097e174204e3710161898b4302e1bceca1e5
core
test_unit_system.py
9
11
https://github.com/home-assistant/core.git
1
59
0
23
88
Python
{ "docstring": "Test that the as_dict() method returns the expected dictionary.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def test_as_dict(): expected = { LENGTH: UnitOfLength.KILOMETERS, WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND, TEMPERATURE: UnitOfTemperature.CELSIUS, VOLUME: UnitOfVolume.LITERS, MASS: UnitOfMass.GRAMS, PRESSURE: UnitOfPressure.PA, ACCUMULATED_PRECIPITATION: UnitOfLength.MILLIMETERS, } assert expected == METRIC_SYSTEM.as_dict()
19,870
100,384
120
plugins/train/model/_base.py
32
13
def _get_inputs(self): logger.debug("Getting inputs") if len(self.input_shape) == 3: input_shapes = [self.input_shape, self.input_shape] else: input_shapes = self.in
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
_get_inputs
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
_base.py
12
10
https://github.com/deepfakes/faceswap.git
3
80
0
28
137
Python
{ "docstring": " Obtain the standardized inputs for the model.\n\n The inputs will be returned for the \"A\" and \"B\" sides in the shape as defined by\n :attr:`input_shape`.\n\n Returns\n -------\n list\n A list of :class:`keras.layers.Input` tensors. This will be a list of 2 tensors (one\n for each side) each of shapes :attr:`input_shape`.\n ", "language": "en", "n_whitespaces": 114, "n_words": 49, "vocab_size": 35 }
def _get_inputs(self): logger.debug("Getting inputs") if len(self.input_shape) == 3: input_shapes = [self.input_shape, self.input_shape] else: input_shapes = self.input_shape inputs = [Input(shape=shape, name=f"face_in_{side}") for side, shape in zip(("a", "b"), input_shapes)] logger.debug("inputs: %s", inputs) return inputs
79,311
268,037
59
test/lib/ansible_test/_internal/python_requirements.py
33
15
def collect_units_install() -> t.List[PipInstall]: requirements_paths = [] # type: t.List[t.Tuple[str, str]] constraints_paths = [] # type: t.List[t.Tuple[str, str]] path = os.path.join(data_context().content.unit_path, 'requirements.txt') requirements_paths.append((data_context().content.root, path)) path = os.path.join(data_context().content.unit_path, 'constraints.txt') constraints_paths.append((data_context().content.root, path)) return collect_install(requirements_paths, constraints_paths)
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
collect_units_install
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
python_requirements.py
12
9
https://github.com/ansible/ansible.git
1
95
0
22
158
Python
{ "docstring": "Return details necessary for the specified units pip install(s).", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def collect_units_install() -> t.List[PipInstall]: requirements_paths = [] # type: t.List[t.Tuple[str, str]] constraints_paths = [] # type: t.List[t.Tuple[str, str]] path = os.path.join(data_context().content.unit_path, 'requirements.txt') requirements_paths.append((data_context().content.root, path)) path = os.path.join(data_context().content.unit_path, 'constraints.txt') constraints_paths.append((data_context().content.root, path)) return collect_install(requirements_paths, constraints_paths)
@add_start_docstrings( """YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.""", YOSO_START_DOCSTRING, )
6,289
34,527
90
src/transformers/models/yoso/modeling_yoso.py
37
13
def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x)
Add YOSO (#15091) * Add cookiecutter files * Add cuda kernels and cpp files * Update modeling_yoso.py * Add .h files * Update configuration_yoso.py * Updates * Remove tokenizer * Code quality * Update modeling_yoso.py * Update modeling_yoso.py * Fix failing test * Update modeling_yoso.py * Fix code quality * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions from code review and fix integration tests * Update src/transformers/models/yoso/modeling_yoso.py Co-authored-by: Patrick von Platen <[email protected]> * Apply suggestions from code review * Fix copied from statement * Fix docstring * Fix code quality * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions and fix mask * Apply suggestions from code review * Fix code quality * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Fix docstrings * Fix code quality * Remove trailing whitespace * Update yoso.mdx * Move kernel loading to YosoEncoder * make style * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * Update src/transformers/models/yoso/modeling_yoso.py Co-authored-by: NielsRogge <[email protected]> * Add short summary to docs * Update docs/source/model_doc/yoso.mdx Co-authored-by: NielsRogge <[email protected]> * Update yoso.mdx * Update docs/source/model_doc/yoso.mdx Co-authored-by: NielsRogge <[email protected]> * Remove CausalLM model and add copied from * Remove autoregressive code * Remove unused imports * add copied from for embeddings * Fix code quality * Update docs/source/model_doc/yoso.mdx Co-authored-by: NielsRogge <[email protected]> * Apply suggestion from code review Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Patrick von Platen <[email protected]>
forward
99a2771189321c826ff55d161a7cfedadd4023c7
transformers
modeling_yoso.py
10
8
https://github.com/huggingface/transformers.git
1
67
1
25
120
Python
{ "docstring": "YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.", "language": "en", "n_whitespaces": 25, "n_words": 23, "vocab_size": 21 }
def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( , YOSO_START_DOCSTRING, )
51,270
205,896
181
django/db/models/sql/subqueries.py
39
18
def delete_batch(self, pk_list, using): # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( f"{field.attname}__in", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE], ) num_deleted += self.do_query( self.get_meta().db_table, self.where, using=using
Refs #33476 -- Reformatted code with Black.
delete_batch
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
subqueries.py
13
13
https://github.com/django/django.git
2
83
0
34
136
Python
{ "docstring": "\n Set up and execute delete queries for all the objects in pk_list.\n\n More than one physical query may be executed if there are a\n lot of values in pk_list.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 27 }
def delete_batch(self, pk_list, using): # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( f"{field.attname}__in", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE], ) num_deleted += self.do_query( self.get_meta().db_table, self.where, using=using ) return num_deleted
79,234
267,959
18
test/lib/ansible_test/_internal/coverage_util.py
8
3
def generate_ansible_coverage_config() -> str: coverage_config = return coverage_config
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
generate_ansible_coverage_config
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
coverage_util.py
7
18
https://github.com/ansible/ansible.git
1
12
0
7
25
Python
{ "docstring": "Generate code coverage configuration for Ansible tests.\n[run]\nbranch = True\nconcurrency = multiprocessing\nparallel = True\n\nomit =\n */python*/dist-packages/*\n */python*/site-packages/*\n */python*/distutils/*\n */pyshared/*\n */pytest\n */AnsiballZ_*.py\n */test/results/*\n", "language": "en", "n_whitespaces": 41, "n_words": 26, "vocab_size": 22 }
def generate_ansible_coverage_config() -> str: coverage_config = return coverage_config
19,555
98,271
68
src/sentry/models/organizationmember.py
14
8
def get_allowed_roles_to_invite(self): return [ r for r in organization_roles.get_all() if r.priority <= organization_roles.get(self.role).priority ]
feat(access): Implement team-based role management and access (#33387) Introduce team-based roles in parallel to existing, organization-based roles. Split the levels into their own objects, accessible through the parent RoleManager object. Map org roles onto the corresponding minimum team roles, which each member with that org role enjoys automatically. Have Access.has_team_scope check scopes given by the member's team role, in addition to those given by their organization role. This differs from previous behavior, in that a member might enjoy a scope for a particular team that they would not if Access.has_scope were called. Introduce the "organizations:team-roles" feature flag. Organizations without this flag don't give any additional scopes for team roles. There is currently no way to assign team roles. API support is pending.
get_allowed_roles_to_invite
b7dee7f2457a911bea343d20f2119e691bb153ce
sentry
organizationmember.py
12
6
https://github.com/getsentry/sentry.git
3
33
0
13
54
Python
{ "docstring": "\n Return a list of roles which that member could invite\n Must check if member member has member:admin first before checking\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
def get_allowed_roles_to_invite(self): return [ r for r in organization_roles.get_all() if r.priority <= organization_roles.get(self.role).priority ]
24,601
112,159
21
nni/retiarii/oneshot/pytorch/supermodule/base.py
7
6
def search_space_spec(self) -> Dict[str, ParameterSpec]: raise NotImplementedError()
Valuechoice oneshot lightning (#4602)
search_space_spec
14d2966b9e91ae16dcc39de8f41017a75cec8ff9
nni
base.py
7
10
https://github.com/microsoft/nni.git
1
17
0
7
29
Python
{ "docstring": "\n Space specification (sample points).\n Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export.\n\n For example: ::\n\n {\"layer1\": ParameterSpec(values=[\"conv\", \"pool\"])}\n ", "language": "en", "n_whitespaces": 68, "n_words": 28, "vocab_size": 27 }
def search_space_spec(self) -> Dict[str, ParameterSpec]: raise NotImplementedError()
71,346
246,790
557
tests/rest/admin/test_room.py
132
34
def test_context_as_admin(self) -> None: # Create a room. We're not part of it. user_id = self.register_user("test", "test") user_tok = self.login("test", "test") room_id = self.helper.create_room_as(user_id, tok=user_tok) # Populate the room with events. events = [] for i in range(30): events.append( self.helper.send_event( room_id, "com.example.test", content={"index": i}, tok=user_tok ) ) # Now let's fetch the context for this room. midway = (len(events) - 1) // 2 channel = self.make_request( "GET", "/_synapse/admin/v1/rooms/%s/context/%s" % (room_id, events[midway]["event_id"]), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body["event"]["event_id"], events[midway]["event_id"] ) for found_event in channel.json_body["events_before"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j < midway) break else: self.fail("Event %s from events_before not found" % j) for found_event in channel.json_body["events_after"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]:
Replace assertEquals and friends with non-deprecated versions. (#12092)
test_context_as_admin
02d708568b476f2f7716000b35c0adfa4cbd31b3
synapse
test_room.py
15
39
https://github.com/matrix-org/synapse.git
8
261
0
89
438
Python
{ "docstring": "\n Test that, as admin, we can find the context of an event without having joined the room.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
def test_context_as_admin(self) -> None: # Create a room. We're not part of it. user_id = self.register_user("test", "test") user_tok = self.login("test", "test") room_id = self.helper.create_room_as(user_id, tok=user_tok) # Populate the room with events. events = [] for i in range(30): events.append( self.helper.send_event( room_id, "com.example.test", content={"index": i}, tok=user_tok ) ) # Now let's fetch the context for this room. midway = (len(events) - 1) // 2 channel = self.make_request( "GET", "/_synapse/admin/v1/rooms/%s/context/%s" % (room_id, events[midway]["event_id"]), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body["event"]["event_id"], events[midway]["event_id"] ) for found_event in channel.json_body["events_before"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j < midway) break else: self.fail("Event %s from events_before not found" % j) for found_event in channel.json_body["events_after"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j > midway) break else: self.fail("Event %s from events_after not found" % j)
36,910
157,370
23
ldm/models/diffusion/dpm_solver/dpm_solver.py
9
7
def marginal_std(self, t):
release more models
marginal_std
ca86da3a30c4e080d4db8c25fca73de843663cb4
stablediffusion
dpm_solver.py
13
2
https://github.com/Stability-AI/stablediffusion.git
1
31
0
9
48
Python
{ "docstring": "\n Compute sigma_t of a given continuous-time label t in [0, T].\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def marginal_std(self, t): return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
16,745
78,245
108
wagtail/contrib/settings/models.py
34
14
def for_request(cls, request): attr_name = cls.get_cache_attr_name() if hasattr(request, attr_name): return getattr(request, attr_name) site = Site.find_for_request(request) site_settings = cls.for_site(site) # to allow mo
Add generic settings to compliment site-specific settings (#8327)
for_request
d967eccef28ce47f60d26be1c28f2d83a25f40b0
wagtail
models.py
9
9
https://github.com/wagtail/wagtail.git
2
61
0
29
98
Python
{ "docstring": "\n Get or create an instance of this model for the request,\n and cache the result on the request for faster repeat access.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
def for_request(cls, request): attr_name = cls.get_cache_attr_name() if hasattr(request, attr_name): return getattr(request, attr_name) site = Site.find_for_request(request) site_settings = cls.for_site(site) # to allow more efficient page url generation site_settings._request = request setattr(request, attr_name, site_settings) return site_settings
45,473
186,377
518
certbot-apache/certbot_apache/_internal/configurator.py
108
31
def _enable_ocsp_stapling(self, ssl_vhost, unused_options): min_apache_ver = (2, 3, 3) if self.get_version() < min_apache_ver: raise errors.PluginError( "Unable to set OCSP directives.\n" "Apache version is below 2.3.3.") if "socache_shmcb_module" not in self.parser.modules: self.enable_mod("socache_shmcb") # Check if there's an existing SSLUseStapling directive on. use_stapling_aug_path = self.parser.find_dir("SSLUseStapling", "on", start=ssl_vhost.path) if not use_stapling_aug_path: self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on") ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep)) # Check if there's an existing SSLStaplingCache directive. stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache', None, ssl_vhost_aug_path) # We'll simply delete the directive, so that we'll have a # consistent OCSP cache path. if stapling_cache_aug_path: self.parser.aug.remove( re.sub(r"/\w*$", "", stapling_cache_aug_path[0])) self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path, "SSLStaplingCache", ["shmcb:/var/run/apache2/stapling_cache(128000)"]) msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
_enable_ocsp_stapling
eeca208c8f57304590ac1af80b496e61021aaa45
certbot
configurator.py
12
26
https://github.com/certbot/certbot.git
5
182
0
89
311
Python
{ "docstring": "Enables OCSP Stapling\n\n In OCSP, each client (e.g. browser) would have to query the\n OCSP Responder to validate that the site certificate was not revoked.\n\n Enabling OCSP Stapling, would allow the web-server to query the OCSP\n Responder, and staple its response to the offered certificate during\n TLS. i.e. clients would not have to query the OCSP responder.\n\n OCSP Stapling enablement on Apache implicitly depends on\n SSLCertificateChainFile being set by other code.\n\n .. note:: This function saves the configuration\n\n :param ssl_vhost: Destination of traffic, an ssl enabled vhost\n :type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n :param unused_options: Not currently used\n :type unused_options: Not Available\n\n :returns: Success, general_vhost (HTTP vhost)\n :rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`)\n\n ", "language": "en", "n_whitespaces": 212, "n_words": 107, "vocab_size": 78 }
def _enable_ocsp_stapling(self, ssl_vhost, unused_options): min_apache_ver = (2, 3, 3) if self.get_version() < min_apache_ver: raise errors.PluginError( "Unable to set OCSP directives.\n" "Apache version is below 2.3.3.") if "socache_shmcb_module" not in self.parser.modules: self.enable_mod("socache_shmcb") # Check if there's an existing SSLUseStapling directive on. use_stapling_aug_path = self.parser.find_dir("SSLUseStapling", "on", start=ssl_vhost.path) if not use_stapling_aug_path: self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on") ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep)) # Check if there's an existing SSLStaplingCache directive. stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache', None, ssl_vhost_aug_path) # We'll simply delete the directive, so that we'll have a # consistent OCSP cache path. if stapling_cache_aug_path: self.parser.aug.remove( re.sub(r"/\w*$", "", stapling_cache_aug_path[0])) self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path, "SSLStaplingCache", ["shmcb:/var/run/apache2/stapling_cache(128000)"]) msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%( ssl_vhost.filep) self.save_notes += msg self.save() logger.info(msg)
14,494
67,319
33
erpnext/regional/united_arab_emirates/utils.py
44
14
def make_regional_gl_entries(gl_entries, doc): country = frappe.get_cached_value("Company", doc.company, "country")
style: format code with black
make_regional_gl_entries
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
utils.py
12
11
https://github.com/frappe/erpnext.git
5
81
0
35
137
Python
{ "docstring": "Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries.", "language": "en", "n_whitespaces": 18, "n_words": 19, "vocab_size": 17 }
def make_regional_gl_entries(gl_entries, doc): country = frappe.get_cached_value("Company", doc.company, "country") if country != "United Arab Emirates": return gl_entries if doc.reverse_charge == "Y": tax_accounts = get_tax_accounts(doc.company) for tax in doc.get("taxes"): if tax.category not in ("Total", "Valuation and Total"): continue gl_entries = make_gl_entry(tax, gl_entries, doc, tax_accounts) return gl_entries
1,387
8,266
75
tests/integration_tests/utils.py
23
8
def private_param(param): return pytest.param( *param, marks=pytest.mark.skipif( not _run_private_tests, reason="Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run", ), )
Fixed issue when uploading output directory artifacts to remote filesystems (#2598)
private_param
d4dcff26dd9f25b3eb43c4e74a56af93879eeab2
ludwig
utils.py
12
8
https://github.com/ludwig-ai/ludwig.git
1
32
0
23
53
Python
{ "docstring": "Wrap param to mark it as private, meaning it requires credentials to run.\n\n Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them.\n ", "language": "en", "n_whitespaces": 37, "n_words": 31, "vocab_size": 27 }
def private_param(param): return pytest.param( *param, marks=pytest.mark.skipif( not _run_private_tests, reason="Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run", ), )
@not_implemented_for("undirected")
42,296
177,161
172
networkx/algorithms/d_separation.py
108
28
def minimal_d_separator(G, u, v): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError("graph should be direct
[ENH] Find and verify a minimal D-separating set in DAG (#5898) * Ran black * Add unit tests * Rename and fix citation * Black * Fix unite tests * Isort * Add algo description * Update networkx/algorithms/tests/test_d_separation.py * Update networkx/algorithms/traversal/breadth_first_search.py * Address dans comments * Fix unit tests * Update networkx/algorithms/tests/test_d_separation.py Co-authored-by: Dan Schult <[email protected]> * Apply suggestions from code review Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/dag.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/dag.py Co-authored-by: Dan Schult <[email protected]> * Fix comments * Clean up the docs a bit more * Merge Co-authored-by: Dan Schult <[email protected]>
minimal_d_separator
df9a128f4171d95671e5d9f5460970cc4bf8e3b3
networkx
d_separation.py
12
15
https://github.com/networkx/networkx.git
4
152
1
77
254
Python
{ "docstring": "Compute a minimal d-separating set between 'u' and 'v'.\n\n A d-separating set in a DAG is a set of nodes that blocks all paths\n between the two nodes, 'u' and 'v'. This function\n constructs a d-separating set that is \"minimal\", meaning it is the smallest\n d-separating set for 'u' and 'v'. This is not necessarily\n unique. For more details, see Notes.\n\n Parameters\n ----------\n G : graph\n A networkx DAG.\n u : node\n A node in the graph, G.\n v : node\n A node in the graph, G.\n\n Raises\n ------\n NetworkXError\n Raises a :exc:`NetworkXError` if the input graph is not a DAG.\n\n NodeNotFound\n If any of the input nodes are not found in the graph,\n a :exc:`NodeNotFound` exception is raised.\n\n References\n ----------\n .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators.\n\n Notes\n -----\n This function only finds ``a`` minimal d-separator. It does not guarantee\n uniqueness, since in a DAG there may be more than one minimal d-separator\n between two nodes. Moreover, this only checks for minimal separators\n between two nodes, not two sets. Finding minimal d-separators between\n two sets of nodes is not supported.\n\n Uses the algorithm presented in [1]_. The complexity of the algorithm\n is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the\n number of edges in the moralized graph of the sub-graph consisting\n of only the ancestors of 'u' and 'v'. For full details, see [1]_.\n\n The algorithm works by constructing the moral graph consisting of just\n the ancestors of `u` and `v`. Then it constructs a candidate for\n a separating set ``Z'`` from the predecessors of `u` and `v`.\n Then BFS is run starting from `u` and marking nodes\n found from ``Z'`` and calling those nodes ``Z''``.\n Then BFS is run again starting from `v` and marking nodes if they are\n present in ``Z''``. Those marked nodes are the returned minimal\n d-separating set.\n\n https://en.wikipedia.org/wiki/Bayesian_network#d-separation\n ", "language": "en", "n_whitespaces": 463, "n_words": 306, "vocab_size": 148 }
def minimal_d_separator(G, u, v): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError("graph should be directed acyclic") union_uv = {u, v} if any(n not in G.nodes for n in union_uv): raise nx.NodeNotFound("one or more specified nodes not found in the graph") # first construct the set of ancestors of X and Y x_anc = nx.ancestors(G, u) y_anc = nx.ancestors(G, v) D_anc_xy = x_anc.union(y_anc) D_anc_xy.update((u, v)) # second, construct the moralization of the subgraph of Anc(X,Y) moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) # find a separating set Z' in moral_G Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v))) # perform BFS on the graph from 'x' to mark Z_dprime = _bfs_with_marks(moral_G, u, Z_prime) Z = _bfs_with_marks(moral_G, v, Z_dprime) return Z @not_implemented_for("undirected")
46,101
189,502
440
manim/mobject/svg/text_mobject.py
50
12
def _change_alignment_for_a_line(self, alignment, line_no): self.lines[1][line_no] = alignment if self.lines[1][line_no] == "center": self[line_no].move_to( np.array([self.get_center()[0], self[line_no].get_center()[1], 0]), ) elif self.lines[1][line_no] == "right": self[line_no].move_to( np.array( [ self.get_right()[0] - self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), ) elif self.lines[1][line_no] == "left": self[line_no].move_to( np.array( [ self.get_left()[0] + self[line_no].width / 2
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
_change_alignment_for_a_line
902e7eb4f0147b5882a613b67467e38a1d47f01e
manim
text_mobject.py
17
26
https://github.com/ManimCommunity/manim.git
4
196
0
30
294
Python
{ "docstring": "Function to change one line's alignment to a specific value.\n\n Parameters\n ----------\n alignment : :class:`str`\n Defines the alignment of paragraph. Possible values are \"left\", \"right\", \"center\".\n line_no : :class:`int`\n Defines the line number for which we want to set given alignment.\n ", "language": "en", "n_whitespaces": 98, "n_words": 41, "vocab_size": 34 }
def _change_alignment_for_a_line(self, alignment, line_no): self.lines[1][line_no] = alignment if self.lines[1][line_no] == "center": self[line_no].move_to( np.array([self.get_center()[0], self[line_no].get_center()[1], 0]), ) elif self.lines[1][line_no] == "right": self[line_no].move_to( np.array( [ self.get_right()[0] - self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), ) elif self.lines[1][line_no] == "left": self[line_no].move_to( np.array( [ self.get_left()[0] + self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), )
72,907
249,425
189
tests/handlers/test_room_member.py
51
22
def test_rejoin_forgotten_by_user(self) -> None: self.helper.join(self.room_id, user=self.
Fix that user cannot `/forget` rooms after the last member has left (#13546)
test_rejoin_forgotten_by_user
682dfcfc0db05d9c99b7615d950997535df4d533
synapse
test_room_member.py
12
18
https://github.com/matrix-org/synapse.git
1
170
0
35
265
Python
{ "docstring": "Test that a user that has forgotten a room can do a re-join.\n The room was not forgotten from the local server.\n One local user is still member of the room.", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 23 }
def test_rejoin_forgotten_by_user(self) -> None: self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has not forgotten the room self.assertFalse( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) # TODO: A join to a room does not invalidate the forgotten cache # see https://github.com/matrix-org/synapse/issues/13262 self.store.did_forget.invalidate_all() self.assertFalse( self.get_success(self.store.did_forget(self.alice, self.room_id)) )
50,601
203,990
53
django/contrib/gis/gdal/libgdal.py
35
10
def std_call(func): if os.name == "nt": return lwingdal[func] else: return lgdal[func] # #### Version-information functions. #### #
Refs #33476 -- Reformatted code with Black.
std_call
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
libgdal.py
9
5
https://github.com/django/django.git
2
25
0
30
77
Python
{ "docstring": "\n Return the correct STDCALL function for certain OSR routines on Win32\n platforms.\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
def std_call(func): if os.name == "nt": return lwingdal[func] else: return lgdal[func] # #### Version-information functions. #### # Return GDAL library version information with the given key. _version_info = std_call("GDALVersionInfo") _version_info.argtypes = [c_char_p] _version_info.restype = c_char_p
51,581
206,594
85
django/utils/datastructures.py
21
10
def __getitem__(self, key): use_func = key.startswith(self.prefix) if use_func:
Refs #33476 -- Reformatted code with Black.
__getitem__
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
datastructures.py
13
8
https://github.com/django/django.git
3
55
0
15
91
Python
{ "docstring": "\n Retrieve the real value after stripping the prefix string (if\n present). If the prefix is present, pass the value through self.func\n before returning, otherwise return the raw value.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 22 }
def __getitem__(self, key): use_func = key.startswith(self.prefix) if use_func: key = key[len(self.prefix) :] value = super().__getitem__(key) if use_func: return self.func(value) return value
11,418
55,900
71
tests/test_client.py
35
8
def not_enough_open_files() -> bool: try: import resource except ImportError: # resource limits is not a concept on all systems, notably Windows return False soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) return soft
Skip running the more intense client tests when the ulimit of files is too low (PrefectHQ/orion#1905) On some systems the ulimit for open files is set excruciatingly low, especially the default settings of 256 for macOS. We can skip the threading tests on systems with no enough open files. Co-authored-by: Michael Adkins <[email protected]>
not_enough_open_files
84d0f8a18f6a413fc70b78e4ccbef67372d05075
prefect
test_client.py
9
11
https://github.com/PrefectHQ/prefect.git
3
36
0
30
63
Python
{ "docstring": "\n The current process does not currently allow enough open files for this test.\n You can increase the number of open files with `ulimit -n 512`.\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 23 }
def not_enough_open_files() -> bool: try: import resource except ImportError: # resource limits is not a concept on all systems, notably Windows return False soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) return soft_limit < 512 or hard_limit < 512
77,722
264,432
273
netbox/netbox/tables/tables.py
79
22
def configure(self, request): # Save ordering preference if request.user.is_authenticated: table_name = self.__class__.__name__ if self.prefixed_order_by_field in request.GET: # If an ordering has been specified as a query parameter, save it as the # user's preferred ordering for this table. ordering = request.GET.getlist(self.prefixed_order_by_field)
Move configure_table() logic to NetBoxTable.configure()
configure
23a80770e1e96c0351cb4ead62ebf294f639845a
netbox
tables.py
13
13
https://github.com/netbox-community/netbox.git
4
107
0
57
185
Python
{ "docstring": "\n Configure the table for a specific request context. This performs pagination and records\n the user's preferred ordering logic.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
def configure(self, request): # Save ordering preference if request.user.is_authenticated: table_name = self.__class__.__name__ if self.prefixed_order_by_field in request.GET: # If an ordering has been specified as a query parameter, save it as the # user's preferred ordering for this table. ordering = request.GET.getlist(self.prefixed_order_by_field) request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True) elif ordering := request.user.config.get(f'tables.{table_name}.ordering'): # If no ordering has been specified, set the preferred ordering (if any). self.order_by = ordering # Paginate the table results paginate = { 'paginator_class': EnhancedPaginator, 'per_page': get_paginate_count(request) } tables.RequestConfig(request, paginate).configure(self)
@profiler.annotate_function
27,280
122,960
115
jax/interpreters/pxla.py
75
19
def _shard_arg(arg, devices, arg_indices): if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices: # The shard_arg_handlers allow an extensible set of types to be sharded, but # inline handling for ShardedDeviceArray as a special case for performance # NOTE: we compare indices instead of sharding_spec because # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster. return [ buf if buf.device()
Simplify Array's shard_arg_handler by merging pmap and pjit/xmap paths PiperOrigin-RevId: 497991966
_shard_arg
1fc9197c79af89ef292dc69d508ed1569f62c4f0
jax
pxla.py
12
9
https://github.com/google/jax.git
5
81
1
64
134
Python
{ "docstring": "Returns a list of size len(devices) containing per-device buffers.\n\n For the C++ pmap path, we fallback to Python (this function) to shard\n arguments that are not supported by the C++ `ShardArg`.\n\n Arrgs:\n arg: The Python argument.\n devices: The list of devices to shard over.\n arg_indices: A list of `len(devices)` indices to use to shard the argument.\n ", "language": "en", "n_whitespaces": 69, "n_words": 56, "vocab_size": 40 }
def _shard_arg(arg, devices, arg_indices): if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices: # The shard_arg_handlers allow an extensible set of types to be sharded, but # inline handling for ShardedDeviceArray as a special case for performance # NOTE: we compare indices instead of sharding_spec because # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster. return [ buf if buf.device() == d else buf.copy_to_device(d) for d, buf in zip(devices, arg.device_buffers) ] else: arg = xla.canonicalize_dtype(arg) return shard_arg_handlers[type(arg)](arg, devices, arg_indices) @profiler.annotate_function
12,082
60,304
135
code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py
51
23
def test_rect(self): n3x3 = coord_net_spec(ks=3, stride=1, pad=0) n5x5 = coord_net_spec(ks=5, stride=2, pad=10) n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10]) ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data) ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data) ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.d
Balanced joint maximum mean discrepancy for deep transfer learning
test_rect
cc4d0564756ca067516f71718a3d135996525909
transferlearning
test_coord_map.py
10
12
https://github.com/jindongwang/transferlearning.git
1
168
0
45
245
Python
{ "docstring": "\n Anisotropic mapping is equivalent to its isotropic parts.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def test_rect(self): n3x3 = coord_net_spec(ks=3, stride=1, pad=0) n5x5 = coord_net_spec(ks=5, stride=2, pad=10) n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10]) ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data) ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data) ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data) self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5) self.assertEquals(a_3x3, a_3x5[0]) self.assertEquals(b_3x3, b_3x5[0]) self.assertEquals(a_5x5, a_3x5[1]) self.assertEquals(b_5x5, b_3x5[1])
16,034
73,512
506
wagtail/contrib/settings/tests/test_model.py
102
17
def test_get_page_url_when_for_settings_fetched_via_for_site(self): self._create_importantpages_object() settings = ImportantPages.for_site(self.default_site) # Force site root paths query beforehand self.default_site.root_page._get_site_root_paths() for page_fk_field, expected_result in ( ("sign_up_page", "http://localhost/"), ("general_terms_page", "http://localhost/"), ("privacy_policy_page", "http://other/"), ): with self.subTest(page_fk_field=page_fk_field): # only the first request for each URL will trigger queries. # 2 are triggered instead of 1 here, because tests use the # database cache backed, and the cache is queried each time # to fetch site root paths (because there's no 'request' to # store them on) with self.assertNumQueries(2): self.assertEqual( settings.get_page_url(page_fk_field), expected_result
Reformat with black
test_get_page_url_when_for_settings_fetched_via_for_site
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_model.py
16
20
https://github.com/wagtail/wagtail.git
2
115
0
74
201
Python
{ "docstring": "ImportantPages.for_site() cannot make the settings object\n request-aware, so things are a little less efficient, and the\n URLs returned will not be site-relative", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 21 }
def test_get_page_url_when_for_settings_fetched_via_for_site(self): self._create_importantpages_object() settings = ImportantPages.for_site(self.default_site) # Force site root paths query beforehand self.default_site.root_page._get_site_root_paths() for page_fk_field, expected_result in ( ("sign_up_page", "http://localhost/"), ("general_terms_page", "http://localhost/"), ("privacy_policy_page", "http://other/"), ): with self.subTest(page_fk_field=page_fk_field): # only the first request for each URL will trigger queries. # 2 are triggered instead of 1 here, because tests use the # database cache backed, and the cache is queried each time # to fetch site root paths (because there's no 'request' to # store them on) with self.assertNumQueries(2): self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called directly self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called indirectly via shortcut self.assertEqual( getattr(settings.page_url, page_fk_field), expected_result )
72,260
248,389
1,066
tests/federation/test_federation_sender.py
119
28
def test_send_receipts_with_backoff(self): mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() # expect a call to send_transaction mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data["edus"], [ { "edu_type": EduTypes.RECEIPT, "content": { "room_id": { "m.read": { "user_id": { "event_ids": ["event_id"], "data": {"ts": 1234}, } } } }, } ], ) mock_send_transaction.reset_mock() # send the second RR receipt = ReadReceipt( "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() mock_send_transaction.assert_not_called() self.reactor.advance(19) mock_send_transaction.assert_not_called() self.reactor.advance(10) mock_send_transaction.assert_ca
Additional constants for EDU types. (#12884) Instead of hard-coding strings in many places.
test_send_receipts_with_backoff
c52abc1cfdd9e5480cdb4a03d626fe61cacc6573
synapse
test_federation_sender.py
21
63
https://github.com/matrix-org/synapse.git
1
296
0
57
519
Python
{ "docstring": "Send two receipts in quick succession; the second should be flushed, but\n only after 20ms", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 15 }
def test_send_receipts_with_backoff(self): mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() # expect a call to send_transaction mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data["edus"], [ { "edu_type": EduTypes.RECEIPT, "content": { "room_id": { "m.read": { "user_id": { "event_ids": ["event_id"], "data": {"ts": 1234}, } } } }, } ], ) mock_send_transaction.reset_mock() # send the second RR receipt = ReadReceipt( "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() mock_send_transaction.assert_not_called() self.reactor.advance(19) mock_send_transaction.assert_not_called() self.reactor.advance(10) mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data["edus"], [ { "edu_type": EduTypes.RECEIPT, "content": { "room_id": { "m.read": { "user_id": { "event_ids": ["other_id"], "data": {"ts": 1234}, } } } }, } ], )
18,094
86,265
3,597
src/sentry/lang/javascript/processor.py
857
76
def process_frame(self, processable_frame, processing_task): frame = processable_frame.frame token = None cache = self.cache sourcemaps = self.sourcemaps all_errors = [] sourcemap_applied = False # can't demangle if there's no filename or line number present if not frame.get("abs_path") or not frame.get("lineno"): return # also can't demangle node's internal modules # therefore we only process user-land frames (starting with /) # or those created by bundle/webpack internals if self.data.get("platform") == "node" and not frame.get("abs_path").startswith( ("/", "app:", "webpack:") ): return errors = cache.get_errors(frame["abs_path"]) if errors: all_errors.extend(errors) # This might fail but that's okay, we try with a different path a # bit later down the road. source = self.get_sourceview(frame["abs_path"]) in_app = None new_frame = dict(frame) raw_frame = dict(frame) sourcemap_url, sourcemap_view = sourcemaps.get_link(frame["abs_path"]) self.sourcemaps_touched.add(sourcemap_url) if sourcemap_view and frame.get("colno") is None: all_errors.append( {"type": EventError.JS_NO_COLUMN, "url": http.expose_url(frame["abs_path"])} ) elif sourcemap_view: if is_data_uri(sourcemap_url): sourcemap_label = frame["abs_path"] else: sourcemap_label = sourcemap_url sourcemap_label = http.expose_url(sourcemap_label) if frame.get("function"): minified_function_name = frame["function"] minified_source = self.get_sourceview(frame["abs_path"]) else: minified_function_name = minified_source = None try: # Errors are 1-indexed in the frames, so we need to -1 to get # zero-indexed value from tokens. assert frame["lineno"] > 0, "line numbers are 1-indexed" token = sourcemap_view.lookup( frame["lineno"] - 1, frame["colno"] - 1, minified_function_name, minified_source ) except Exception: token = None all_errors.append( { "type": EventError.JS_INVALID_SOURCEMAP_LOCATION, "column": frame.get("colno"), "row": frame.get("lineno"), "source": frame["abs_path"], "sourcemap": sourcemap_label, } ) # persist the token so that we can find it later processable_frame.data["token"] = token # Store original data in annotation new_frame["data"] = dict(frame.get("data") or {}, sourcemap=sourcemap_label) sourcemap_applied = True if token is not None: abs_path = non_standard_url_join(sourcemap_url, token.src) logger.debug( "Mapping compressed source %r to mapping in %r", frame["abs_path"], abs_path ) source = self.get_sourceview(abs_path) if source is None: errors = cache.get_errors(abs_path) if errors: all_errors.extend(errors) else: all_errors.append( {"type": EventError.JS_
ref(processor): Use symbolic-sourcemapcache for JavaScript Sourcemap processing (#38551) This PR attempts to replace the currently used `rust-sourcemap` crate and it's symbolic python bindings, with `symbolic-sourcemapcache` crate. It makes the whole processing pipeline easier to maintain, as it pushes some work directly to Symbolic, as well as we get better function names due to better scope resolution and in some cases better file URLs. Other than that, we don't use `SourceView` anymore, as it seemed like an unnecessary layer of abstraction for something that is used only for `context_lines` extraction. We cache `utf-8` decoded sources directly now, as this way we can encode them only once for `SmCache` instance initialization, and use the source directly otherwise for context lines extraction. Some tests had to updated to express current behavior. The notable thing is `useless_fn_names = ["<anonymous>", "__webpack_require__", "__webpack_modules__"]`, which is mostly for `production` mode of webpack, that by default trims all the function names, and we decided to fallback to the minified names in those cases instead (this was already the old behavior). It should be possible to extract something better, but we'd need to parse all `sourceContents` from sourcemap to do that, as the only thing we can get better function name for the case mentioned above, is if we look at the right-hand side of default node export, in form of `module.exports = function foo () {}`. This should give us `foo`, yet the only thing we can extract is `module.exports`, as minified form of this expression in webpack production mode is `module.exports = function () {}`.
process_frame
ae9c0d8a33d509d9719a5a03e06c9797741877e9
sentry
processor.py
21
145
https://github.com/getsentry/sentry.git
51
953
0
421
1,670
Python
{ "docstring": "\n Attempt to demangle the given frame.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
def process_frame(self, processable_frame, processing_task): frame = processable_frame.frame token = None cache = self.cache sourcemaps = self.sourcemaps all_errors = [] sourcemap_applied = False # can't demangle if there's no filename or line number present if not frame.get("abs_path") or not frame.get("lineno"): return # also can't demangle node's internal modules # therefore we only process user-land frames (starting with /) # or those created by bundle/webpack internals if self.data.get("platform") == "node" and not frame.get("abs_path").startswith( ("/", "app:", "webpack:") ): return errors = cache.get_errors(frame["abs_path"]) if errors: all_errors.extend(errors) # This might fail but that's okay, we try with a different path a # bit later down the road. source = self.get_sourceview(frame["abs_path"]) in_app = None new_frame = dict(frame) raw_frame = dict(frame) sourcemap_url, sourcemap_view = sourcemaps.get_link(frame["abs_path"]) self.sourcemaps_touched.add(sourcemap_url) if sourcemap_view and frame.get("colno") is None: all_errors.append( {"type": EventError.JS_NO_COLUMN, "url": http.expose_url(frame["abs_path"])} ) elif sourcemap_view: if is_data_uri(sourcemap_url): sourcemap_label = frame["abs_path"] else: sourcemap_label = sourcemap_url sourcemap_label = http.expose_url(sourcemap_label) if frame.get("function"): minified_function_name = frame["function"] minified_source = self.get_sourceview(frame["abs_path"]) else: minified_function_name = minified_source = None try: # Errors are 1-indexed in the frames, so we need to -1 to get # zero-indexed value from tokens. assert frame["lineno"] > 0, "line numbers are 1-indexed" token = sourcemap_view.lookup( frame["lineno"] - 1, frame["colno"] - 1, minified_function_name, minified_source ) except Exception: token = None all_errors.append( { "type": EventError.JS_INVALID_SOURCEMAP_LOCATION, "column": frame.get("colno"), "row": frame.get("lineno"), "source": frame["abs_path"], "sourcemap": sourcemap_label, } ) # persist the token so that we can find it later processable_frame.data["token"] = token # Store original data in annotation new_frame["data"] = dict(frame.get("data") or {}, sourcemap=sourcemap_label) sourcemap_applied = True if token is not None: abs_path = non_standard_url_join(sourcemap_url, token.src) logger.debug( "Mapping compressed source %r to mapping in %r", frame["abs_path"], abs_path ) source = self.get_sourceview(abs_path) if source is None: errors = cache.get_errors(abs_path) if errors: all_errors.extend(errors) else: all_errors.append( {"type": EventError.JS_MISSING_SOURCE, "url": http.expose_url(abs_path)} ) # the tokens are zero indexed, so offset correctly new_frame["lineno"] = token.src_line + 1 new_frame["colno"] = token.src_col + 1 # Try to use the function name we got from symbolic original_function_name = token.function_name # In the ideal case we can use the function name from the # frame and the location to resolve the original name # through the heuristics in our sourcemap library. if original_function_name is None: last_token = None # Find the previous token for function name handling as a # fallback. if ( processable_frame.previous_frame and processable_frame.previous_frame.processor is self ): last_token = processable_frame.previous_frame.data.get("token") if last_token: original_function_name = last_token.name if original_function_name is not None: new_frame["function"] = original_function_name filename = token.src # special case webpack support # abs_path will always be the full path with webpack:/// prefix. # filename will be relative to that if abs_path.startswith("webpack:"): filename = abs_path # webpack seems to use ~ to imply "relative to resolver root" # which is generally seen for third party deps # (i.e. node_modules) if "/~/" in filename: filename = "~/" + abs_path.split("/~/", 1)[-1] elif WEBPACK_NAMESPACE_RE.match(filename): filename = re.sub(WEBPACK_NAMESPACE_RE, "./", abs_path) else: filename = filename.split("webpack:///", 1)[-1] # As noted above: # * [js/node] '~/' means they're coming from node_modules, so these are not app dependencies # * [node] sames goes for `./node_modules/` and '../node_modules/', which is used when bundling node apps # * [node] and webpack, which includes it's own code to bootstrap all modules and its internals # eg. webpack:///webpack/bootstrap, webpack:///external if ( filename.startswith("~/") or "/node_modules/" in filename or not filename.startswith("./") ): in_app = False # And conversely, local dependencies start with './' elif filename.startswith("./"): in_app = True # We want to explicitly generate a webpack module name new_frame["module"] = generate_module(filename) # while you could technically use a subpath of 'node_modules' for your libraries, # it would be an extremely complicated decision and we've not seen anyone do it # so instead we assume if node_modules is in the path its part of the vendored code elif "/node_modules/" in abs_path: in_app = False if abs_path.startswith("app:"): if filename and NODE_MODULES_RE.search(filename): in_app = False else: in_app = True new_frame["abs_path"] = abs_path new_frame["filename"] = filename if not frame.get("module") and abs_path.startswith( ("http:", "https:", "webpack:", "app:") ): new_frame["module"] = generate_module(abs_path) elif sourcemap_url: new_frame["data"] = dict( new_frame.get("data") or {}, sourcemap=http.expose_url(sourcemap_url) ) # TODO: theoretically a minified source could point to # another mapped, minified source changed_frame = self.expand_frame(new_frame, source=source) # If we did not manage to match but we do have a line or column # we want to report an error here. if not new_frame.get("context_line") and source and new_frame.get("colno") is not None: all_errors.append( { "type": EventError.JS_INVALID_SOURCEMAP_LOCATION, "column": new_frame["colno"], "row": new_frame["lineno"], "source": new_frame["abs_path"], } ) changed_raw = sourcemap_applied and self.expand_frame(raw_frame) if sourcemap_applied or all_errors or changed_frame or changed_raw: # In case we are done processing, we iterate over all errors that we got # and we filter out all `JS_MISSING_SOURCE` errors since we consider if we have # a `context_line` we have a symbolicated frame and we don't need to show the error has_context_line = bool(new_frame.get("context_line")) if has_context_line: all_errors[:] = [ x for x in all_errors if x.get("type") is not EventError.JS_MISSING_SOURCE ] if in_app is not None: new_frame["in_app"] = in_app raw_frame["in_app"] = in_app # Run new processor only for frames that were actually modified in any way. if should_run_smcache(self) and new_frame != raw_frame: smcache_rv = self.smcache_processor.process_frame(processable_frame, None) set_path(new_frame, "data", "smcache_frame", value=smcache_rv[0][0]) new_frames = [new_frame] raw_frames = [raw_frame] if changed_raw else None return new_frames, raw_frames, all_errors
78,250
265,949
229
netbox/utilities/forms/utils.py
95
14
def validate_csv(headers, fields, required_fields): # Validate provided column headers is_update = False for field, to_field in headers.items(): if field == "id": is_update = True continue if field not in fields: raise forms.Vali
7961 CSV bulk update (#10715) * 7961 add csv bulk update * temp checkin - blocked * 7961 bugfix and cleanup * 7961 change to id, add docs * 7961 add tests cases * 7961 fix does not exist validation error * 7961 fix does not exist validation error * 7961 update tests * 7961 update tests * 7961 update tests * 7961 update tests * 7961 update tests * 7961 update tests * 7961 update tests * 7961 update tests * 7961 update tests * 7961 make test cases more explicit * 7961 make test cases more explicit * 7961 make test cases more explicit * 7961 make test cases more explicit * 7961 make test cases more explicit * 7961 make test cases more explicit * 7961 make test cases more explicit * 7961 optimize loading csv test data * 7961 update tests remove redundant code * 7961 avoid MPTT issue in test cases
validate_csv
cb815ede60ab298ca13907d523126380f50a8023
netbox
utils.py
15
16
https://github.com/netbox-community/netbox.git
11
118
0
59
212
Python
{ "docstring": "\n Validate that parsed csv data conforms to the object's available fields. Raise validation errors\n if parsed csv data contains invalid headers or does not contain required headers.\n ", "language": "en", "n_whitespaces": 37, "n_words": 27, "vocab_size": 24 }
def validate_csv(headers, fields, required_fields): # Validate provided column headers is_update = False for field, to_field in headers.items(): if field == "id": is_update = True continue if field not in fields: raise forms.ValidationError(f'Unexpected column header "{field}" found.') if to_field and not hasattr(fields[field], 'to_field_name'): raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots') if to_field and not hasattr(fields[field].queryset.model, to_field): raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}') # Validate required fields (if not an update) if not is_update: for f in required_fields: if f not in headers: raise forms.ValidationError(f'Required column header "{f}" not found.')
@public
49,299
199,621
37
sympy/polys/appellseqs.py
21
9
def bernoulli_poly(n, x=None, polys=False): return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2), lambda p, i: p * QQ(1<<(i-1), 1-(1<<i)), QQ, x, polys) @public
Initial definition of Appell sequences
bernoulli_poly
e875bdb804b0285e4a9bd8de0158436e792c03cb
sympy
appellseqs.py
14
3
https://github.com/sympy/sympy.git
1
78
1
21
111
Python
{ "docstring": "Generates the Bernoulli polynomial of degree `n` in `x`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 67, "n_words": 35, "vocab_size": 29 }
def bernoulli_poly(n, x=None, polys=False): return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2), lambda p, i: p * QQ(1<<(i-1), 1-(1<<i)), QQ, x, polys) @public
34,465
149,634
149
tests/data/test_btanalysis.py
91
35
def test_calculate_max_drawdown_abs(values, relative, result, result_rel): dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))] df = DataFrame(zip(values, dates), columns=['profit
Improve test for max_drawdown calculations
test_calculate_max_drawdown_abs
9bc6bbe472f58bbec82d741ab916d66c52b2978a
freqtrade
test_btanalysis.py
12
14
https://github.com/freqtrade/freqtrade.git
2
152
0
69
236
Python
{ "docstring": "\n Test case from issue https://github.com/freqtrade/freqtrade/issues/6655\n [1000, 500, 1000, 11000, 10000] # absolute results\n [1000, 50%, 0%, 0%, ~9%] # Relative drawdowns\n ", "language": "en", "n_whitespaces": 46, "n_words": 21, "vocab_size": 18 }
def test_calculate_max_drawdown_abs(values, relative, result, result_rel): dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))] df = DataFrame(zip(values, dates), columns=['profit_abs', 'open_date']) # sort by profit and reset index df = df.sort_values('profit_abs').reset_index(drop=True) df1 = df.copy() drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown( df, date_col='open_date', starting_balance=1000, relative=relative) # Ensure df has not been altered. assert df.equals(df1) assert isinstance(drawdown, float) assert isinstance(drawdown_rel, float) # High must be before low assert hdate < ldate # High value must be higher than low value assert hval > lval assert drawdown == result assert pytest.approx(drawdown_rel) == result_rel
29,749
132,413
130
python/ray/tune/tests/test_checkpoint_manager.py
32
15
def testOnCheckpointUnavailableAttribute(self): checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1) no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {}) with patch.object(logger, "error") as log_error_mock: checkpoint_manager.on_checkpoint(no_attr_checkpoint) log_error_mock.assert_called_once() # The newest checkpoint should still be set despite this error.
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
testOnCheckpointUnavailableAttribute
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_checkpoint_manager.py
11
9
https://github.com/ray-project/ray.git
1
62
0
30
106
Python
{ "docstring": "\n Tests that an error is logged when the associated result of the\n checkpoint has no checkpoint score attribute.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
def testOnCheckpointUnavailableAttribute(self): checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1) no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {}) with patch.object(logger, "error") as log_error_mock: checkpoint_manager.on_checkpoint(no_attr_checkpoint) log_error_mock.assert_called_once() # The newest checkpoint should still be set despite this error. self.assertEqual( checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint )
@pytest.fixture
5,090
27,097
28
saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py
10
8
def subscription_app_status_changed_webhook(subscription_webhook): return subscription_webhook( APP_STATUS_CHANGED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_STATUS_CHANGED, ) CATEGORY_CREATED_SUBSCRIPTION_QUERY = @pytest.fixture
New events related to apps changes. (#9698) * New events related to apps changes. * Schema update after rebase * CHANGELOG.md update * New events description fix * Missing app event added to CHANGELOG.md
subscription_app_status_changed_webhook
b5e414c98a1535d287721c859994424cf0eea081
saleor
fixtures.py
8
5
https://github.com/saleor/saleor.git
1
15
1
10
37
Python
{ "docstring": "\n subscription{\n event{\n ...on CategoryCreated{\n category{\n id\n }\n }\n }\n }\n", "language": "en", "n_whitespaces": 69, "n_words": 10, "vocab_size": 7 }
def subscription_app_status_changed_webhook(subscription_webhook): return subscription_webhook( APP_STATUS_CHANGED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_STATUS_CHANGED, ) CATEGORY_CREATED_SUBSCRIPTION_QUERY = @pytest.fixture
35,617
153,801
196
modin/core/dataframe/pandas/dataframe/dataframe.py
49
23
def binary_op(self, op, right_frame, join_type="outer"): left_parts, right_parts, joined_index, row_lengths = self._copartition( 0, right_frame, join_type, sort=True ) # unwrap list returned by `copartition`. right_parts = right_parts[0]
PERF-#4493: Use partition size caches more in Modin dataframe. (#4495) Co-authored-by: Devin Petersohn <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: mvashishtha <[email protected]>
binary_op
cca9468648521e9317de1cb69cf8e6b1d5292d21
modin
dataframe.py
11
16
https://github.com/modin-project/modin.git
1
104
0
40
149
Python
{ "docstring": "\n Perform an operation that requires joining with another Modin DataFrame.\n\n Parameters\n ----------\n op : callable\n Function to apply after the join.\n right_frame : PandasDataframe\n Modin DataFrame to join with.\n join_type : str, default: \"outer\"\n Type of join to apply.\n\n Returns\n -------\n PandasDataframe\n New Modin DataFrame.\n ", "language": "en", "n_whitespaces": 160, "n_words": 45, "vocab_size": 36 }
def binary_op(self, op, right_frame, join_type="outer"): left_parts, right_parts, joined_index, row_lengths = self._copartition( 0, right_frame, join_type, sort=True ) # unwrap list returned by `copartition`. right_parts = right_parts[0] new_frame = self._partition_mgr_cls.binary_operation( 1, left_parts, lambda l, r: op(l, r), right_parts ) new_columns = self.columns.join(right_frame.columns, how=join_type) return self.__constructor__( new_frame, joined_index, new_columns, row_lengths, column_widths=self._column_widths_cache, )
@contextlib.contextmanager
23,107
108,226
109
lib/matplotlib/__init__.py
42
15
def rc_file(fname, *, use_default_template=True): # Deprecation warnings were already handled in rc_params_from_file, no need # to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rc_from_file = rc_params_from_file( fname, use_default_template=use_default_template) rcParams.update({k: rc_from_file[k] for k in rc_from_file
Fix removed cross-references
rc_file
7c6c5f6215b40a27cfefb7bf21246299fd9b3a1e
matplotlib
__init__.py
12
7
https://github.com/matplotlib/matplotlib.git
3
58
1
37
103
Python
{ "docstring": "\n Update `.rcParams` from file.\n\n Style-blacklisted `.rcParams` (defined in\n ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.\n\n Parameters\n ----------\n fname : str or path-like\n A file with Matplotlib rc settings.\n\n use_default_template : bool\n If True, initialize with default parameters before updating with those\n in the given file. If False, the current configuration persists\n and only the parameters specified in the file are updated.\n ", "language": "en", "n_whitespaces": 111, "n_words": 58, "vocab_size": 43 }
def rc_file(fname, *, use_default_template=True): # Deprecation warnings were already handled in rc_params_from_file, no need # to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rc_from_file = rc_params_from_file( fname, use_default_template=use_default_template) rcParams.update({k: rc_from_file[k] for k in rc_from_file if k not in STYLE_BLACKLIST}) @contextlib.contextmanager
32,000
140,529
18
python/ray/util/collective/collective_group/gloo_util.py
9
7
def create_gloo_context(rank, world_size): context = pygloo.rendezvous.Context(rank, world_size) return context
Clean up docstyle in python modules and add LINT rule (#25272)
create_gloo_context
905258dbc19753c81039f993477e7ab027960729
ray
gloo_util.py
9
3
https://github.com/ray-project/ray.git
1
22
0
8
36
Python
{ "docstring": "Create a GLOO context using GLOO APIs.\n\n Args:\n rank: the rank of this process.\n world_size: the number of processes of this collective group.\n\n Returns:\n context (pygloo.Context): a GLOO context.\n ", "language": "en", "n_whitespaces": 59, "n_words": 29, "vocab_size": 21 }
def create_gloo_context(rank, world_size): context = pygloo.rendezvous.Context(rank, world_size) return context
6,763
37,307
35
src/transformers/testing_utils.py
12
5
def require_bitsandbytes(test_case): if not is_bitsandbytes
Add support for bitsandbytes (#15622) * Add initial BNB integration * fixup! Add initial BNB integration * Add bnb test decorator * Update Adamw8bit option name * Use the full bnb package name * Overide bnb for all embedding layers * Fix package name * Formatting * Remove unnecessary import * Update src/transformers/trainer.py Co-authored-by: Stas Bekman <[email protected]> * Rename AdamwBNB optimizer option * Add training test checking that bnb memory utilization is lower * fix merge * fix merge; fix + extend new test * cleanup * expand bnb * move all require_* candidates to testing_utils.py Co-authored-by: Stas Bekman <[email protected]> Co-authored-by: Stas Bekman <[email protected]>
require_bitsandbytes
3104036e7f1a3cd6e07a69d648c3597de32f72fe
transformers
testing_utils.py
11
5
https://github.com/huggingface/transformers.git
2
26
0
11
49
Python
{ "docstring": "\n Decorator for bits and bytes (bnb) dependency\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def require_bitsandbytes(test_case): if not is_bitsandbytes_available(): return unittest.skip("test requires bnb")(test_case) else: return test_case
85,374
285,675
75
openbb_terminal/api.py
22
17
def copy_func(f) -> Callable:
Next release : reports on steroids (#2349) * fix gov tests * refactor insider * new virtual path extraction * removed some symbol default params as they're considered critical * little adjustments * portfolio refactor * merge API factory * add helpers, stocks, crypto, forex * minor forex changes * include forex api paths * add 2 missing forex funcs * portfolio brokers refactor * display help on api func call * add econometrics virtual paths to api * add api unit test * fixed report for the new api * minor portfolio refactorings * added gdapps * anchor_yield path * some more crypto path fixes * small change * fixed wrong param * minor fixes * wip - inital commit for forex report * add bw as a model, we'll get better solution afterwards * added ema with dummy model as it adds great functionality to the report * minor fixes * wip - added functions to forex report * add feedparser news path * add new virtual paths to api * adding commands to equity report * revert to old paths, new ones were breaking * Add in very basic ETF report * Add candle chart to ETF report * add etf load * allow use of candle without data * add raw to candle * added forex report * ongoing equity report * equity report change * fix some portfolio bugs and add docstrings * include portfolio paths and coin class * add crypto paths * change event dates to str * starting economy report * window for limit * equity report and refactor newsapi * add helper to api * update on economy report * equity report * update economy report * refactor some docstrings * change maturities helper * refactor newsapi * refactor futures command * add some sauce to ycrv plot * black * update report * refactor alphavantage * refactor wsj * update economy report * ycrv tenor * map avaiable_indices * map economy helpers * fix econdb docstring * add plots on economy report * minor fixes * wip - crypto report * update economy report * added same default args as view * added view to explicity use chart=True when suing the api * adjustments - removed rich tables to use only df * final version economy report * change report name * equity report for review * linting * add etf symbols endpoint * incorporate feedback economy report * fix reports launch by adding tag to economy report * fix equity bug * remove analyst name * fix * fix news * make links hyperlinks for equity * click links * fixed arg name * improved news * small improves * Fix light terminal stylesheet that would prevent using it in notebooks (#2473) * improved report * run reports in installer * fix #2209 * minor ycrv refactoring * refactor portfolio/holdv virtual path * refactor benchmark trades * fix events args * adapt economy report to changes * fix portfolio controller bug * holdv refactor * refactor perf command * start portfolio report * remove perf view * refactor holp * add textwrap3 to poetry (doesn't solve the error) * fix equity after merge * add some rolling commands * fix equity after save button * improved crypto report, plus minor fixes * minor fixes on the reports * add maxdd and distr * refactor qa * var command * refactor qa expected shortfall * add es command * add es command * fix qa percentile bug * fix economy rendering * refactor qa omega * add om command * add summary command * add dret command * add mret command * add yret command * add metrics * add allocs to report * remove bro and po commands, add later * fixed some tests * adjustments to crypto report * Fix docstring for VSCode Added a note about installing Jupyter PowerToys extension for optimal API usage in Jupyter VSCode, in the API_README.md. * minor adjustment * remove nft calendar model virtual paths * Add in Portfolio report * fix external axes portfolio view * Update portfolio report with rolling plots * Details for ETF and Portfolio * fix economy report * change analyst to openbb * floppy * fixed unmatched axis in reports * Speed up tests * fix file and load on po * get_news output * add some po paths * Add integration tests for Reports menu * refactor maxsharpe * open maxsharpe * open minrisk * open maxutil * open maxret * Added fixes * black * remove useless views * Fixed small issue * refactor ef * open ef api * portfolio optimization report * Added fixes * unblock api loading * add more endpoints * update po report * unblock api loading * update po report * expose herc * expose property endpoint * Added fixes * More api fixes * flake8 * Fixed some mypy * news api model * flake8 * mypy fix * mypy * black * pylint * fix tests * markdown * markdown * Added fixes * fix economy report * merge * fix economy report * remove empty notebook * expose nco * remove jupyter notebook * expose plot endpoint * remove po report, just used for tests * api v paths plot * remove api_old * change loading msg Co-authored-by: montezdesousa <[email protected]> Co-authored-by: hjoaquim <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: Om Gupta <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: JerBouma <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: Om Gupta <[email protected]> Co-authored-by: Diogo Sousa <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: northern-64bit <[email protected]> Co-authored-by: colin99d <[email protected]> Co-authored-by: Minh Hoang <[email protected]>
copy_func
72b0a9f1ee8b91ad9fd9e76d80d2ccab51ee6d21
OpenBBTerminal
api.py
10
21
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
60
0
18
91
Python
{ "docstring": "Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277\n Parameters\n ----------\n f: Callable\n Function to be copied\n Returns\n -------\n g: Callable\n New function\n ", "language": "en", "n_whitespaces": 61, "n_words": 26, "vocab_size": 24 }
def copy_func(f) -> Callable: g = types.FunctionType( f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__, ) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
3,185
20,032
463
pipenv/patched/notpip/_vendor/distlib/markers.py
123
19
def evaluate(self, expr, context): if isinstance(expr, string_types): if expr[0] in '\'"': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) if ((elhs == 'python_version' or erhs == 'python_version') and op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): lhs = NV(lhs) rhs = NV(rhs) elif elhs == 'python_version' and op
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
evaluate
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
markers.py
16
28
https://github.com/pypa/pipenv.git
12
233
0
73
395
Python
{ "docstring": "\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
def evaluate(self, expr, context): if isinstance(expr, string_types): if expr[0] in '\'"': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) if ((elhs == 'python_version' or erhs == 'python_version') and op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): lhs = NV(lhs) rhs = NV(rhs) elif elhs == 'python_version' and op in ('in', 'not in'): lhs = NV(lhs) rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result
@keras_export("keras.__internal__.backend.track_variable", v1=[])
80,151
269,522
29
keras/backend.py
11
9
def track_tf_optimizer(tf_optimizer): if tf.executing_eagerly(): return optimizers = _GRAPH_TF_OPTIMIZERS[None] optimiz
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
track_tf_optimizer
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
backend.py
8
5
https://github.com/keras-team/keras.git
2
26
1
11
64
Python
{ "docstring": "Tracks the given TF optimizer for initialization of its variables.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def track_tf_optimizer(tf_optimizer): if tf.executing_eagerly(): return optimizers = _GRAPH_TF_OPTIMIZERS[None] optimizers.add(tf_optimizer) @keras_export("keras.__internal__.backend.track_variable", v1=[])
47,370
195,687
582
sympy/polys/numberfields/galoisgroups.py
247
49
def _galois_group_degree_5(T, max_tries=30, randomize=False): r from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.named_groups import ( CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup
Add a `galois_group()` function
_galois_group_degree_5
d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe
sympy
galoisgroups.py
16
60
https://github.com/sympy/sympy.git
10
556
0
159
820
Python
{ "docstring": "\n Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9\n of Cohen.\n\n References\n ==========\n\n .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.\n\n ", "language": "en", "n_whitespaces": 47, "n_words": 28, "vocab_size": 26 }
def _galois_group_degree_5(T, max_tries=30, randomize=False): r from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.named_groups import ( CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup ) # The ideas here are all the same as in the degree-4 method. # The specific resolvents we use, and how we interpret the results, are # adapted to the degree-5 case. X = symbols('X0 X1 X2 X3 X4') # For the first resolvent, we have G = S5, # and stabilizer H = M20 = < (01234), (1234) >. F1 = (X[0]**2*(X[1]*X[4] + X[2]*X[3]) + X[1]**2*(X[2]*X[0] + X[3]*X[4]) + X[2]**2*(X[3]*X[1] + X[4]*X[0]) + X[3]**2*(X[4]*X[2] + X[0]*X[1]) + X[4]**2*(X[0]*X[3] + X[1]*X[2])) s1 = [ Permutation(4), Permutation(4)(0, 1), Permutation(4)(0, 2), Permutation(4)(0, 3), Permutation(4)(0, 4), Permutation(4)(1, 4) ] R1 = Resolvent(F1, X, s1) # For the second resolvent, we'll have G = D5, H = C5. F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2 s2_pre = [ Permutation(4), Permutation(4)(0, 1)(2, 4) ] history = set() for i in range(max_tries): if i > 0: _, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize) R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True) if not dup_sqf_p(R_dup, ZZ): continue sq_disc = has_square_disc(T) if i0 is None: return (AlternatingGroup(5), True) if sq_disc else (SymmetricGroup(5), False) if not sq_disc: return (M20(), False) sigma = s1[i0] F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True) s2 = [sigma*tau*sigma for tau in s2_pre] R2 = Resolvent(F2, X, s2) R_dup, _, _ = R2.eval_for_poly(T) d = dup_discriminant(R_dup, ZZ) if d == 0: continue if is_square(d): return (CyclicGroup(5), True) else: return (DihedralGroup(5), True) raise MaxTriesException
55,922
220,126
44
python3.10.4/Lib/argparse.py
16
10
def error(self, message): se
add python 3.10.4 for windows
error
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
argparse.py
11
4
https://github.com/XX-net/XX-Net.git
1
42
0
16
74
Python
{ "docstring": "error(message: string)\n\n Prints a usage message incorporating the message to stderr and\n exits.\n\n If you override this in a subclass, it should not return -- it\n should either exit or raise an exception.\n ", "language": "en", "n_whitespaces": 68, "n_words": 33, "vocab_size": 29 }
def error(self, message): self.print_usage(_sys.stderr) args = {'prog': self.prog, 'message': message} self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
@keras_export("keras.applications.regnet.decode_predictions")
80,066
269,418
15
keras/applications/regnet.py
9
4
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument return x @keras_export("k
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
preprocess_input
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
regnet.py
7
2
https://github.com/keras-team/keras.git
1
12
1
9
33
Python
{ "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the regnet model\n implementation. Users are no longer required to call this method to normalize\n the input data. This method does nothing and only kept as a placeholder to\n align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it, it\n defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "language": "en", "n_whitespaces": 152, "n_words": 95, "vocab_size": 76 }
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument return x @keras_export("keras.applications.regnet.decode_predictions")
16,323
74,807
214
wagtail/documents/tests/test_admin_views.py
46
24
def test_edit_post(self): # Send request response = self.client.post( reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)), { "doc-%d-%s" % (self.doc.id, field): data for field, data in self.edit_post_data.items() }, ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Check JSON response_json = json.loads(response.content.decode()) self.assertIn("doc_id", response_json) self.assertNotIn("form", response_json) self.assertIn("success", response_json) self.assertEqual(response_json["doc_id"], self.doc.id) self.assertTrue(response_json["success"]) self.check_doc_after_edit()
Reformat with black
test_edit_post
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_admin_views.py
14
17
https://github.com/wagtail/wagtail.git
2
147
0
38
246
Python
{ "docstring": "\n This tests that a POST request to the edit view edits the document\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
def test_edit_post(self): # Send request response = self.client.post( reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)), { "doc-%d-%s" % (self.doc.id, field): data for field, data in self.edit_post_data.items() }, ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Check JSON response_json = json.loads(response.content.decode()) self.assertIn("doc_id", response_json) self.assertNotIn("form", response_json) self.assertIn("success", response_json) self.assertEqual(response_json["doc_id"], self.doc.id) self.assertTrue(response_json["success"]) self.check_doc_after_edit()
117,439
320,926
26
tests/unit/mainwindow/test_messageview.py
10
13
def test_message_hiding(qtbot, view): with qtbot.wait_signal(view._clear_timer.timeout): view.show_message(message.M
Add a MessageInfo data class Preparation for #7246
test_message_hiding
5616a99eff34f7074641d1391ed77d6b4b743529
qutebrowser
test_messageview.py
13
4
https://github.com/qutebrowser/qutebrowser.git
1
42
0
10
72
Python
{ "docstring": "Messages should be hidden after the timer times out.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_message_hiding(qtbot, view): with qtbot.wait_signal(view._clear_timer.timeout): view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) assert not view._messages
71,034
246,133
283
tests/rest/client/test_profile.py
48
19
def test_avatar_allowed_mime_type_per_room(self): self._setup_local_files( { "good": {"mimetype": "image/png"}, "bad": {"mimetype": "application/octet-stream"}, } ) room_id = self.helper.create_room_as(tok=self.owner_tok) channel = self.make_request( "PUT", f"/rooms/{room_id}/state/m.room.member/{self.owner}",
Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19
test_avatar_allowed_mime_type_per_room
bf60da1a60096fac5fb778b732ff2214862ac808
synapse
test_profile.py
12
25
https://github.com/matrix-org/synapse.git
1
150
0
32
276
Python
{ "docstring": "Tests that the MIME type whitelist for avatars is enforced when updating a\n per-room profile.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 15 }
def test_avatar_allowed_mime_type_per_room(self): self._setup_local_files( { "good": {"mimetype": "image/png"}, "bad": {"mimetype": "application/octet-stream"}, } ) room_id = self.helper.create_room_as(tok=self.owner_tok) channel = self.make_request( "PUT", f"/rooms/{room_id}/state/m.room.member/{self.owner}", content={"membership": "join", "avatar_url": "mxc://test/bad"}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 403, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body ) channel = self.make_request( "PUT", f"/rooms/{room_id}/state/m.room.member/{self.owner}", content={"membership": "join", "avatar_url": "mxc://test/good"}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 200, channel.result)
79,292
268,018
20
test/lib/ansible_test/_internal/host_profiles.py
6
7
def container_name(self) -> t.Optional[str]: return self.state.get('container_na
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
container_name
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
host_profiles.py
8
3
https://github.com/ansible/ansible.git
1
22
0
6
39
Python
{ "docstring": "Return the stored container name, if any, otherwise None.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def container_name(self) -> t.Optional[str]: return self.state.get('container_name')
75,702
259,303
148
sklearn/metrics/_scorer.py
37
10
def get_scorer(scoring): if isinstance(scoring, str): try: scorer = copy.deepcopy(_SCORERS[scoring])
API get_scorer returns a copy and introduce get_scorer_names (#22866)
get_scorer
7dc97a378ecbfa056dd9cfa9d1ef4c07d2d0cc1f
scikit-learn
_scorer.py
15
13
https://github.com/scikit-learn/scikit-learn.git
3
46
0
30
83
Python
{ "docstring": "Get a scorer from string.\n\n Read more in the :ref:`User Guide <scoring_parameter>`.\n :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names\n of all available scorers.\n\n Parameters\n ----------\n scoring : str or callable\n Scoring method as string. If callable it is returned as is.\n\n Returns\n -------\n scorer : callable\n The scorer.\n\n Notes\n -----\n When passed a string, this function always returns a copy of the scorer\n object. Calling `get_scorer` twice for the same scorer results in two\n separate scorer objects.\n ", "language": "en", "n_whitespaces": 137, "n_words": 78, "vocab_size": 62 }
def get_scorer(scoring): if isinstance(scoring, str): try: scorer = copy.deepcopy(_SCORERS[scoring]) except KeyError: raise ValueError( "%r is not a valid scoring value. " "Use sklearn.metrics.get_scorer_names() " "to get valid options." % scoring ) else: scorer = scoring return scorer
75,239
258,445
1,034
sklearn/discriminant_analysis.py
249
50
def fit(self, X, y): X, y = self._validate_data( X, y, ensure_min_samples=2, dtype=[np.float64, np.float32] ) self.classes_ = unique_labels(y) n_samples, _ = X.shape n_classes = len(self.classes_) if n_samples == n_classes: raise ValueError( "The number of samples must be more than the number of classes." ) if self.priors is None: # estimate priors from sample _, y_t = np.unique(y, return_inverse=True) # non-negative ints self.priors_ = np.bincount(y_t) / float(len(y)) else: self.prio
ENH Adds get_feature_names_out for discriminant_analysis (#22120)
fit
5c675183d81d71e7e670bb32cf869afb99b513af
scikit-learn
discriminant_analysis.py
14
68
https://github.com/scikit-learn/scikit-learn.git
13
437
0
151
696
Python
{ "docstring": "Fit the Linear Discriminant Analysis model.\n\n .. versionchanged:: 0.19\n *store_covariance* has been moved to main constructor.\n\n .. versionchanged:: 0.19\n *tol* has been moved to main constructor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "language": "en", "n_whitespaces": 187, "n_words": 52, "vocab_size": 38 }
def fit(self, X, y): X, y = self._validate_data( X, y, ensure_min_samples=2, dtype=[np.float64, np.float32] ) self.classes_ = unique_labels(y) n_samples, _ = X.shape n_classes = len(self.classes_) if n_samples == n_classes: raise ValueError( "The number of samples must be more than the number of classes." ) if self.priors is None: # estimate priors from sample _, y_t = np.unique(y, return_inverse=True) # non-negative ints self.priors_ = np.bincount(y_t) / float(len(y)) else: self.priors_ = np.asarray(self.priors) if (self.priors_ < 0).any(): raise ValueError("priors must be non-negative") if not np.isclose(self.priors_.sum(), 1.0): warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning) self.priors_ = self.priors_ / self.priors_.sum() # Maximum number of components no matter what n_components is # specified: max_components = min(len(self.classes_) - 1, X.shape[1]) if self.n_components is None: self._max_components = max_components else: if self.n_components > max_components: raise ValueError( "n_components cannot be larger than min(n_features, n_classes - 1)." ) self._max_components = self.n_components if self.solver == "svd": if self.shrinkage is not None: raise NotImplementedError("shrinkage not supported") if self.covariance_estimator is not None: raise ValueError( "covariance estimator " "is not supported " "with svd solver. Try another solver" ) self._solve_svd(X, y) elif self.solver == "lsqr": self._solve_lsqr( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) elif self.solver == "eigen": self._solve_eigen( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) else: raise ValueError( "unknown solver {} (valid solvers are 'svd', " "'lsqr', and 'eigen').".format(self.solver) ) if self.classes_.size == 2: # treat binary case as a special case self.coef_ = np.array( self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype ) self.intercept_ = np.array( self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype ) self._n_features_out = self._max_components return self
17,646
83,277
59
zerver/webhooks/bitbucket3/tests.py
20
5
def test_pr_opened_with_multiple_reviewers(self) -> None: expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt." expected_message = self.check_webhook( "pull_request_opened_with_multiple_reviewers", expected_topic, expected_message )
docs: Fix many spelling mistakes. Signed-off-by: Anders Kaseorg <[email protected]>
test_pr_opened_with_multiple_reviewers
b0ce4f1bce8031881addecb1e86073483517f392
zulip
tests.py
8
6
https://github.com/zulip/zulip.git
1
23
0
18
46
Python
{ "docstring": "[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\\n\\n~~~ quote\\nAdd a simple text file for further testing purposes.\\n~~~", "language": "en", "n_whitespaces": 24, "n_words": 25, "vocab_size": 22 }
def test_pr_opened_with_multiple_reviewers(self) -> None: expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt." expected_message = self.check_webhook( "pull_request_opened_with_multiple_reviewers", expected_topic, expected_message )
7,284
39,911
16
dash/_grouping.py
10
7
def make_grouping_by_key(schema, source, default=None):
extended ctx.arg_grouping and changed it to AttributeDict
make_grouping_by_key
d19f04c9529d624a8d8f9d02f047c4e972f9d4db
dash
_grouping.py
10
2
https://github.com/plotly/dash.git
1
29
0
10
45
Python
{ "docstring": "\n Create a grouping from a schema by using the schema's scalar values to look up\n items in the provided source object.\n\n :param schema: A grouping of potential keys in source\n :param source: Dict-like object to use to look up scalar grouping value using\n scalar grouping values as keys\n :param default: Default scalar value to use if grouping scalar key is not present\n in source\n :return: grouping\n ", "language": "en", "n_whitespaces": 102, "n_words": 66, "vocab_size": 39 }
def make_grouping_by_key(schema, source, default=None): return map_grouping(lambda s: source.get(s, default), schema)
47,778
196,278
39
sympy/geometry/point.py
18
11
def taxicab_distance(self, p): s, p = Point._normalize_dimension(self, Point(p)) return Add(*(abs(a - b) for a, b in zip(s, p)))
Updated import locations
taxicab_distance
498015021131af4dbb07eb110e5badaba8250c7b
sympy
point.py
12
3
https://github.com/sympy/sympy.git
2
47
0
18
74
Python
{ "docstring": "The Taxicab Distance from self to point p.\n\n Returns the sum of the horizontal and vertical distances to point p.\n\n Parameters\n ==========\n\n p : Point\n\n Returns\n =======\n\n taxicab_distance : The sum of the horizontal\n and vertical distances to point p.\n\n See Also\n ========\n\n sympy.geometry.point.Point.distance\n\n Examples\n ========\n\n >>> from sympy import Point\n >>> p1, p2 = Point(1, 1), Point(4, 5)\n >>> p1.taxicab_distance(p2)\n 7\n\n ", "language": "en", "n_whitespaces": 188, "n_words": 62, "vocab_size": 40 }
def taxicab_distance(self, p): s, p = Point._normalize_dimension(self, Point(p)) return Add(*(abs(a - b) for a, b in zip(s, p)))
79,750
268,884
21
keras/metrics/metrics.py
17
8
def cosine_similarity(y_true, y_pred, axis=-1): y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred,
Refactor disparate metrics-related files into a single metrics folder. Further work may be needed to split up the long file with individual metric definitions. However having a single file per metric may be too granular. TBD. PiperOrigin-RevId: 425248502
cosine_similarity
b4dca51d0558e788f62a96d1009a07f773a202f4
keras
metrics.py
9
4
https://github.com/keras-team/keras.git
1
54
0
13
83
Python
{ "docstring": "Computes the cosine similarity between labels and predictions.\n\n Args:\n y_true: The ground truth values.\n y_pred: The prediction values.\n axis: (Optional) Defaults to -1. The dimension along which the cosine\n similarity is computed.\n\n Returns:\n Cosine similarity value.\n ", "language": "en", "n_whitespaces": 56, "n_words": 36, "vocab_size": 29 }
def cosine_similarity(y_true, y_pred, axis=-1): y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred, axis=axis) return tf.reduce_sum(y_true * y_pred, axis=axis)
22,523
106,953
215
lib/matplotlib/transforms.py
110
19
def rotate(self, theta): a = math.cos(theta) b = math.sin(theta)
Micro-optimize rotation transform. The following test script shows a ~3x speedup. ```python import math, numpy as np mtx = np.array([[.1, .2, .3], [.4, .5, .6], [0, 0, 1]]) theta = np.pi / 4 def rotate(mtx, theta): a = math.cos(theta) b = math.sin(theta) rotate_mtx = np.array([[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]], float) return np.dot(rotate_mtx, mtx) def rfast(mtx, theta): a = math.cos(theta) b = math.sin(theta) (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist() # mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx mtx[0, 0] = a * xx - b * yx mtx[0, 1] = a * xy - b * yy mtx[0, 2] = a * x0 - b * y0 mtx[1, 0] = b * xx + a * yx mtx[1, 1] = b * xy + a * yy mtx[1, 2] = b * x0 + a * y0 return mtx %timeit rotate(mtx, theta) %timeit rfast(mtx, theta) ```
rotate
ff120cdc5aef1d609913678b1ac8c26e6f30691e
matplotlib
transforms.py
8
13
https://github.com/matplotlib/matplotlib.git
1
143
0
53
214
Python
{ "docstring": "\n Add a rotation (in radians) to this transform in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n ", "language": "en", "n_whitespaces": 64, "n_words": 28, "vocab_size": 26 }
def rotate(self, theta): a = math.cos(theta) b = math.sin(theta) mtx = self._mtx # Operating and assigning one scalar at a time is much faster. (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist() # mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx mtx[0, 0] = a * xx - b * yx mtx[0, 1] = a * xy - b * yy mtx[0, 2] = a * x0 - b * y0 mtx[1, 0] = b * xx + a * yx mtx[1, 1] = b * xy + a * yy mtx[1, 2] = b * x0 + a * y0 self.invalidate() return self
85,834
286,499
1,328
openbb_terminal/parent_classes.py
198
59
def call_load(self, other_args): parser = argparse.ArgumentParser( add_help=False,
Sdk dates (#3354) * example changes in slopes * change lettering size and side bar capitalization * revert back to Fira * start automatic website generation * this was autogen * add examples to slopes model * generate slopes doc * change to _index.md * allow italic formatting * fix regex * option to regenerate paths * update alt docs * fix generate * update alt * fix generate * update common * target italic only for types * format alt * format italic common * add sig indentation * update sig indent alt * update common ident * add todo * generate docstrings for all menus * fix maxdd * fix returns font size * fix keys docs * fix more docstrings * escape literal symbols * escape literal symbols * reformat keys * format opt * remove literal escape * remove another literal escape * remove another literal escape * unindent returns * update docs return unindent * add comma in last arg * fix funcs without params * fix signature * compact some code * refactor some more code * refactor some code * some final cleanup * write docstrings * change main * move futures paths * generate futures docs * add external axes references * fix typo * revert to double docstring * fix small bug * remove docs folder * generate.py in website folder * add forecast to docs * clear some warnings * fix underscore * remove cite * refresh website docs * fix forecast docstrings * fix po * fix po docs and remove italic * fix more docstrings * remove last warning * codespell * flake8 * exclude website contente from flake * noqa on optimizer * update website * fix mypy * remove setup from mypy * mypy to openbbterminal * update precommit * pylint * try to remove sdk loading issue * fix dates active command * fix crypto.change formats * fix eb formats * nonzero fix * format dates crypto.load * format supply transac * format hr altindex * format load crypto * regenerate docs * format ba trend dates * regenerate docs * format ba trend * candle defaults * fix sentiment test * remove unused import * shopt * shopt again * revert crypto helpers * test shopt * fix some tests * skip trending test * fix alcoin test * helpers * write docs * rewrite helper Co-authored-by: Jeroen Bouma <[email protected]>
call_load
46141766d7250671b7bc75872e2034afe4938374
OpenBBTerminal
parent_classes.py
16
99
https://github.com/OpenBB-finance/OpenBBTerminal.git
7
486
0
141
791
Python
{ "docstring": "Process load command.Load crypto currency to perform analysis on.\n Yahoo Finance is used as default source.\n Other sources can be used such as 'ccxt' or 'cg' with --source.\n If you select 'ccxt', you can then select any exchange with --exchange.\n You can also select a specific interval with --interval.", "language": "en", "n_whitespaces": 92, "n_words": 49, "vocab_size": 40 }
def call_load(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="load", description=, ) parser.add_argument( "-c", "--coin", help="Coin to get. Must be coin symbol (e.g., btc, eth)", dest="coin", type=str, required="-h" not in other_args, ) parser.add_argument( "-s", "--start", type=valid_date, default=(datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d"), dest="start", help="The starting date (format YYYY-MM-DD) of the crypto", ) parser.add_argument( "--exchange", help="Exchange to search", dest="exchange", type=str, default="binance", choices=self.exchanges, ) parser.add_argument( "-e", "--end", type=valid_date, default=datetime.now().strftime("%Y-%m-%d"), dest="end", help="The ending date (format YYYY-MM-DD) of the crypto", ) parser.add_argument( "-i", "--interval", action="store", dest="interval", type=str, default="1440", choices=["1", "5", "15", "30", "60", "240", "1440", "10080", "43200"], help="The interval of the crypto", ) parser.add_argument( "--vs", help="Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise", # noqa dest="vs", default="usdt", type=str, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-c") ns_parser = self.parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: if ns_parser.source in ("YahooFinance", "CoinGecko"): if ns_parser.vs == "usdt": ns_parser.vs = "usd" (self.current_df) = cryptocurrency_helpers.load( symbol=ns_parser.coin.lower(), vs_currency=ns_parser.vs, end_date=ns_parser.end.strftime("%Y-%m-%d"), start_date=ns_parser.start.strftime("%Y-%m-%d"), interval=ns_parser.interval, source=ns_parser.source, exchange=ns_parser.exchange, ) if not self.current_df.empty: self.vs = ns_parser.vs self.exchange = ns_parser.exchange self.source = ns_parser.source self.current_interval = ns_parser.interval self.current_currency = ns_parser.vs self.symbol = ns_parser.coin.lower() cryptocurrency_helpers.show_quick_performance( self.current_df, self.symbol, self.current_currency, ns_parser.source, ns_parser.exchange, self.current_interval, ) export_data( ns_parser.export, os.path.dirname(os.path.abspath(__file__)), "load", self.current_df.copy(), )
@register.filter('json')
77,734
264,448
123
netbox/utilities/templatetags/builtins/filters.py
72
19
def render_markdown(value): schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES) # Strip HTML tags value = strip_tags(value) # Sanitize Markdown links pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)' value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE) # Sanitize Markdown reference links pattern = fr'\[(.+)\]:\s*(?!({schemes}))\w*:(.+)' value = re.sub(pattern, '[\\1]: \\3', value, flags=re.IGNORECASE) # Render Markdown html = markdown(value, extensions=
Closes #8600: Document built-in template tags & filters
render_markdown
7c105019d8ae9205051c302e7499b33a455f9176
netbox
filters.py
12
11
https://github.com/netbox-community/netbox.git
2
98
1
50
198
Python
{ "docstring": "\n Render a string as Markdown. This filter is invoked as \"markdown\":\n\n {{ md_source_text|markdown }}\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 13 }
def render_markdown(value): schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES) # Strip HTML tags value = strip_tags(value) # Sanitize Markdown links pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)' value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE) # Sanitize Markdown reference links pattern = fr'\[(.+)\]:\s*(?!({schemes}))\w*:(.+)' value = re.sub(pattern, '[\\1]: \\3', value, flags=re.IGNORECASE) # Render Markdown html = markdown(value, extensions=['fenced_code', 'tables', StrikethroughExtension()]) # If the string is not empty wrap it in rendered-markdown to style tables if html: html = f'<div class="rendered-markdown">{html}</div>' return mark_safe(html) @register.filter('json')
42,454
177,588
174
label_studio/projects/functions/next_task.py
62
22
def _try_breadth_first(tasks, user): tasks = tasks.annotate(annotations_count=Count(
feat: DEV-469: Skip queue (#1693) * DEV-469 Skip queue project setting * DEV-469 review fixes * Merge migrations (DEV-469) * Update requirements-test.txt * Update requirements-test.txt * Update test_exception.py * Revert "Update test_exception.py" This reverts commit b9c686c9bacaf298bafe3a207352cc5260fef737. * Revert "Update requirements-test.txt" This reverts commit 3704d29978761089bcd008506f9e1c30a162bb3a. * Revert "Update requirements-test.txt" This reverts commit 50273847ae2872b31bccc376d04a3afff0efcf21. * Recalc is_labeled after skip_queue change (DEV-469) * Fix migrations (DEV-469) Co-authored-by: Max Tkachenko <[email protected]> Co-authored-by: niklub <[email protected]> Co-authored-by: nik <[email protected]>
_try_breadth_first
074af782e6f351c711f18d8ad6a05aa4f632339c
label-studio
next_task.py
16
17
https://github.com/heartexlabs/label-studio.git
3
104
0
49
174
Python
{ "docstring": "Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible\n ", "language": "en", "n_whitespaces": 23, "n_words": 20, "vocab_size": 17 }
def _try_breadth_first(tasks, user): tasks = tasks.annotate(annotations_count=Count('annotations')) max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max'] if max_annotations_count == 0: # there is no any labeled tasks found return # find any task with maximal amount of created annotations not_solved_tasks_labeling_started = tasks.annotate( reach_max_annotations_count=Case( When(annotations_count=max_annotations_count, then=Value(True)), default=Value(False), output_field=BooleanField(), ) ) not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter( reach_max_annotations_count=True ) if not_solved_tasks_labeling_with_max_annotations.exists(): # try to complete tasks that are already in progress return _get_random_unlocked(not_solved_tasks_labeling_with_max_annotations, user)
33,074
143,838
405
rllib/policy/tests/test_rnn_sequencing.py
66
27
def test_pad_batch_dynamic_max(self): view_requirements = { "state_in_0": ViewRequirement( "state_out_0", shift=[-1], used_for_training=False, used_for_compute_actions=True, batch_repeat_value=1, ) } max_seq_len = 20 num_seqs = np.random.randint(1, 20) seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs)) max_len = np.max(seq_lens) sum_seq_lens = np.sum(seq_lens) s1 = SampleBatch( { "a": np.arange(sum_seq_lens), "b": np.arange(sum_seq_lens), "seq_lens": seq_lens, "state_in_0": [[0]] * num_seqs, }, _max_seq_len=max_seq_len, ) pad_batch_to_sequences_of_same_size( s1, max_seq_len=max_seq_len, feature_keys=["a", "b"], view_requirements=view_requirements, ) check(s1.max_seq_len, max_len) check(s1["a"].shape[0], max_len * num_seqs) check(s1["b"].shape[0], max_len * num_seqs)
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
test_pad_batch_dynamic_max
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
test_rnn_sequencing.py
13
33
https://github.com/ray-project/ray.git
1
190
0
49
299
Python
{ "docstring": "Test pad_batch_to_sequences_of_same_size when dynamic_max = True", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
def test_pad_batch_dynamic_max(self): view_requirements = { "state_in_0": ViewRequirement( "state_out_0", shift=[-1], used_for_training=False, used_for_compute_actions=True, batch_repeat_value=1, ) } max_seq_len = 20 num_seqs = np.random.randint(1, 20) seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs)) max_len = np.max(seq_lens) sum_seq_lens = np.sum(seq_lens) s1 = SampleBatch( { "a": np.arange(sum_seq_lens), "b": np.arange(sum_seq_lens), "seq_lens": seq_lens, "state_in_0": [[0]] * num_seqs, }, _max_seq_len=max_seq_len, ) pad_batch_to_sequences_of_same_size( s1, max_seq_len=max_seq_len, feature_keys=["a", "b"], view_requirements=view_requirements, ) check(s1.max_seq_len, max_len) check(s1["a"].shape[0], max_len * num_seqs) check(s1["b"].shape[0], max_len * num_seqs)
45,950
188,975
171
psutil/_pslinux.py
61
31
def sensors_fans(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') if not basenames: # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') basenames = sorted(set([x.split('_')[0] for x in basenames])) for base in basenames: try: current = int(bcat(base + '_input')) except (IOError, OSError) as err: debug(err) continue unit_name = cat(os.path.join(os.path.dirname(base), 'name')) label = cat(base + '_label', fallback='') ret[
[Linux] cat/bcat utils refactoring (#2053)
sensors_fans
46cb6c212a870b36bd0af17c48dd29f53468734b
psutil
_pslinux.py
16
16
https://github.com/giampaolo/psutil.git
5
143
0
48
245
Python
{ "docstring": "Return hardware fans info (for CPU and other peripherals) as a\n dict including hardware label and current speed.\n\n Implementation notes:\n - /sys/class/hwmon looks like the most recent interface to\n retrieve this info, and this implementation relies on it\n only (old distros will probably use something else)\n - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon\n ", "language": "en", "n_whitespaces": 79, "n_words": 54, "vocab_size": 45 }
def sensors_fans(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') if not basenames: # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') basenames = sorted(set([x.split('_')[0] for x in basenames])) for base in basenames: try: current = int(bcat(base + '_input')) except (IOError, OSError) as err: debug(err) continue unit_name = cat(os.path.join(os.path.dirname(base), 'name')) label = cat(base + '_label', fallback='') ret[unit_name].append(_common.sfan(label, current)) return dict(ret)
55,257
218,360
31
python3.10.4/Lib/importlib/util.py
10
6
def factory(cls, loader): cls.__check_eager_loader(loader) return lambda *args, **
add python 3.10.4 for windows
factory
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
util.py
11
3
https://github.com/XX-net/XX-Net.git
1
33
0
10
55
Python
{ "docstring": "Construct a callable which returns the eager loader made lazy.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def factory(cls, loader): cls.__check_eager_loader(loader) return lambda *args, **kwargs: cls(loader(*args, **kwargs))
16,053
73,586
197
wagtail/contrib/table_block/tests.py
40
10
def test_table_block_caption_render(self): value = { "table_caption": "caption", "first_row_is_table_header": False, "first_col_is_header": Fals
Reformat with black
test_table_block_caption_render
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
tests.py
11
25
https://github.com/wagtail/wagtail.git
1
83
0
31
140
Python
{ "docstring": "\n Test a generic render with caption.\n \n <table>\n <caption>caption</caption>\n <tbody>\n <tr><td>Test 1</td><td>Test 2</td><td>Test 3</td></tr>\n <tr><td></td><td></td><td></td></tr>\n <tr><td></td><td></td><td></td></tr>\n </tbody>\n </table>\n ", "language": "en", "n_whitespaces": 164, "n_words": 17, "vocab_size": 16 }
def test_table_block_caption_render(self): value = { "table_caption": "caption", "first_row_is_table_header": False, "first_col_is_header": False, "data": [ ["Test 1", "Test 2", "Test 3"], [None, None, None], [None, None, None], ], } block = TableBlock() result = block.render(value) expected = self.assertHTMLEqual(result, expected) self.assertIn("Test 2", result)
472
3,409
59
airbyte-integrations/connectors/source-salesforce/unit_tests/unit_test.py
15
1
def stream_config_without_start_date():
Source Salesforce: Deprecate API Type parameter (#9302) * use BULK for the first sync, REST for incremental sync * if stream contains compound data or/and base64 use always REST * fix get stream state from connector state * fix integration test * refactor catalog name * format code * refactor unit tests * refactor unit tests 2 * format code 2 * Set additionalProperties to true not to break test temporarily * fix unit test and remove unnecessary filtering fields * bump version * updated spec and def yaml Co-authored-by: auganbay <[email protected]>
stream_config_without_start_date
0a3713a5a52995dc0dc205d8edfd097bf625899f
airbyte
unit_test.py
8
8
https://github.com/airbytehq/airbyte.git
1
28
0
15
58
Python
{ "docstring": "Generates streams settings for REST logic without start_date", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def stream_config_without_start_date(): return { "client_id": "fake_client_id", "client_secret": "fake_client_secret", "refresh_token": "fake_refresh_token", "is_sandbox": False, "wait_timeout": 15, }
35,023
151,476
132
freqtrade/rpc/api_server/ws/channel.py
25
8
async def relay(self): while True: message = await self.queue.get() try: await self.send(message) self.queue.task_done() except RuntimeError: # The connection was closed, just exit t
refactor broadcasting to a queue per client
relay
3e8d8fd1b08e28f8ec231de9ee3be57a539b266e
freqtrade
channel.py
12
8
https://github.com/freqtrade/freqtrade.git
3
39
0
24
72
Python
{ "docstring": "\n Relay messages from the channel's queue and send them out. This is started\n as a task.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
async def relay(self): while True: message = await self.queue.get() try: await self.send(message) self.queue.task_done() except RuntimeError: # The connection was closed, just exit the task return
9,909
49,786
98
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py
33
12
def q_sample(self, x_start, t, noise=None): if noise is None: # noise = th.randn_like(x_start) noise = paddle.randn(x_start.shape, x_start.dtype) assert noise.shape == x_start.shape return (_extract_into_tensor(self.sqrt_alphas_cumpr
add disco_diffusion_cnclip_vitb16 module
q_sample
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
PaddleHub
gaussian_diffusion.py
11
6
https://github.com/PaddlePaddle/PaddleHub.git
2
73
0
26
109
Python
{ "docstring": "\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n ", "language": "en", "n_whitespaces": 102, "n_words": 52, "vocab_size": 42 }
def q_sample(self, x_start, t, noise=None): if noise is None: # noise = th.randn_like(x_start) noise = paddle.randn(x_start.shape, x_start.dtype) assert noise.shape == x_start.shape return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
25,222
114,587
47
mindsdb/integrations/postgres_handler/postgres_handler.py
19
5
def get_views(self): query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')" result = self.run_native_query(q
Get tables, views, describe
get_views
7e3da9157508a5eb38dbfabbd7f08ba8fa6c5a88
mindsdb
postgres_handler.py
8
4
https://github.com/mindsdb/mindsdb.git
1
20
0
17
36
Python
{ "docstring": "\n List all views in PostgreSQL without the system views information_schema and pg_catalog\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
def get_views(self): query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')" result = self.run_native_query(query) return result
50,640
204,139
1,068
django/contrib/gis/utils/layermapping.py
274
36
def verify_ogr_field(self, ogr_field, model_field): if isinstance(ogr_field, OFTString) and isinstance( model_field, (models.CharField, models.TextField) ): if self.encoding and ogr_field.value is not None: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_str(ogr_field.value, self.encoding) else: val = ogr_field.value if ( model_field.max_length and val is not None and len(val) > model_field.max_length ): raise InvalidString( "%s model field maximum string length is %s, given %s characters." % (model_field.name, model_field.max_length, len(val)) ) elif isinstance(ogr_field, OFTReal) and isinstance( model_field, models.DecimalField ): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal( "Could not construct decimal from: %s" % ogr_field.value ) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( "A DecimalField with max_digits %d, decimal_places %d must " "round to an absolute value less than 10^%d." % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance( model_field, models.IntegerField ): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = in
Refs #33476 -- Reformatted code with Black.
verify_ogr_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
layermapping.py
16
53
https://github.com/django/django.git
16
278
0
155
451
Python
{ "docstring": "\n Verify if the OGR Field contents are acceptable to the model field. If\n they are, return the verified value, otherwise raise an exception.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 21 }
def verify_ogr_field(self, ogr_field, model_field): if isinstance(ogr_field, OFTString) and isinstance( model_field, (models.CharField, models.TextField) ): if self.encoding and ogr_field.value is not None: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_str(ogr_field.value, self.encoding) else: val = ogr_field.value if ( model_field.max_length and val is not None and len(val) > model_field.max_length ): raise InvalidString( "%s model field maximum string length is %s, given %s characters." % (model_field.name, model_field.max_length, len(val)) ) elif isinstance(ogr_field, OFTReal) and isinstance( model_field, models.DecimalField ): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal( "Could not construct decimal from: %s" % ogr_field.value ) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( "A DecimalField with max_digits %d, decimal_places %d must " "round to an absolute value less than 10^%d." % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance( model_field, models.IntegerField ): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = int(ogr_field.value) except ValueError: raise InvalidInteger( "Could not construct integer from: %s" % ogr_field.value ) else: val = ogr_field.value return val
2,741
13,717
73
jina/serve/streamer.py
19
8
def get_streamer(): if 'JINA_STREAMER_ARGS' in os.environ: args_dict = json.loads(os.environ['JINA_STREAMER_ARGS']) return GatewayStreamer(**args_dict)
feat: add get_streamer helper and inject streamer info (#5472)
get_streamer
b36e6bdb1f5d02a4c5af3131f3a07d7b4ccddced
jina
streamer.py
12
6
https://github.com/jina-ai/jina.git
2
38
0
19
71
Python
{ "docstring": "\n Return a streamer object based on the current environment context.\n The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable.\n If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will\n raise an error.\n The streamer object does not have tracing/instrumentation capabilities.\n\n :return: Returns an instance of `GatewayStreamer`\n ", "language": "en", "n_whitespaces": 108, "n_words": 58, "vocab_size": 45 }
def get_streamer(): if 'JINA_STREAMER_ARGS' in os.environ: args_dict = json.loads(os.environ['JINA_STREAMER_ARGS']) return GatewayStreamer(**args_dict) else: raise OSError('JINA_STREAMER_ARGS environment variable is not set')