ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
48,786
198,085
618
sympy/functions/elementary/trigonometric.py
122
31
def _pi_coeff(arg, cycles=1): r arg = sympify(arg) if arg is pi: return S.One elif not arg: return S.Zero elif arg.is_Mul: cx = arg.coeff(pi) if cx: c, x = cx.as_coeff_Mul() # pi is not included as coeff if c.is_Float: # recast exact binary fractions to Rationals f = abs(c) % 1 if f != 0: p = -int(round(log(f, 2).evalf())) m = 2**p cm = c*m i = int(cm) if i == cm: c = Rational(i, m) cx = c*x else: c = Rational(int(c)) cx = c*x if x.is_integer: c2 = c % 2 if c2 == 1: return x elif not c2:
replace S.Pi with pi; cache InverseTrigonometric tables
_pi_coeff
dcb6e3c69f4e47f2fdb10a2ef0ede2cc6c8f2e06
sympy
trigonometric.py
24
72
https://github.com/sympy/sympy.git
13
201
0
70
327
Python
{ "docstring": "\n When arg is a Number times $\\pi$ (e.g. $3\\pi/2$) then return the Number\n normalized to be in the range $[0, 2]$, else `None`.\n\n When an even multiple of $\\pi$ is encountered, if it is multiplying\n something with known parity then the multiple is returned as 0 otherwise\n as 2.\n\n Examples\n ========\n\n >>> from sympy.functions.elementary.trigonometric import _pi_coeff\n >>> from sympy import pi, Dummy\n >>> from sympy.abc import x\n >>> _pi_coeff(3*x*pi)\n 3*x\n >>> _pi_coeff(11*pi/7)\n 11/7\n >>> _pi_coeff(-11*pi/7)\n 3/7\n >>> _pi_coeff(4*pi)\n 0\n >>> _pi_coeff(5*pi)\n 1\n >>> _pi_coeff(5.0*pi)\n 1\n >>> _pi_coeff(5.5*pi)\n 3/2\n >>> _pi_coeff(2 + pi)\n\n >>> _pi_coeff(2*Dummy(integer=True)*pi)\n 2\n >>> _pi_coeff(2*Dummy(even=True)*pi)\n 0\n\n ", "language": "en", "n_whitespaces": 189, "n_words": 98, "vocab_size": 68 }
def _pi_coeff(arg, cycles=1): r arg = sympify(arg) if arg is pi: return S.One elif not arg: return S.Zero elif arg.is_Mul: cx = arg.coeff(pi) if cx: c, x = cx.as_coeff_Mul() # pi is not included as coeff if c.is_Float: # recast exact binary fractions to Rationals f = abs(c) % 1 if f != 0: p = -int(round(log(f, 2).evalf())) m = 2**p cm = c*m i = int(cm) if i == cm: c = Rational(i, m) cx = c*x else: c = Rational(int(c)) cx = c*x if x.is_integer: c2 = c % 2 if c2 == 1: return x elif not c2: if x.is_even is not None: # known parity return S.Zero return Integer(2) else: return c2*x return cx elif arg.is_zero: return S.Zero
async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]: """Verify that the still image is valid before we create an entity.""" fmt = None if not (url := info.get(CONF_STILL_IMAGE_URL)): return {}, None if not isinstance(url, template_helper.Template) and url: url = cv.template(url) url.hass = hass try: url = url.async_render(parse_result=False) except TemplateError as err: _LOGGER.error("Error parsing template %s: %s", url, err) return {CONF_STILL_IMAGE_URL: "template_error"}, None verify_ssl = info.get(CONF_VERIFY_SSL) auth = generate_auth(info) try: async_client = get_async_client(hass, verify_ssl=verify_ssl)
93,650
294,616
139
homeassistant/components/generic/config_flow.py
63
27
async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]: fmt = None if not (url := info.get(CONF_STILL_IMAGE_URL)): return {}, None if not isinstance(url, template_helper.Template) and url: url = cv.template(url) url.hass = hass try: url = url.async_render(parse_result=False) except TemplateError as err: _LOGGER.error("Error parsing template %s: %s", url, err) return {CONF_STILL_IMAGE_URL: "template_error"}, None verify_ssl = info.get(CONF_VERIFY_SSL) au
Generic IP Camera configflow 2 (#52360) Co-authored-by: J. Nick Koston <[email protected]>
async_test_still
c1a2be72fc8b76b55cfde1823c5688100e397369
core
config_flow.py
11
40
https://github.com/home-assistant/core.git
8
253
1
50
208
Python
{ "docstring": "Verify that the still image is valid before we create an entity.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]: fmt = None if not (url := info.get(CONF_STILL_IMAGE_URL)): return {}, None if not isinstance(url, template_helper.Template) and url: url = cv.template(url) url.hass = hass try: url = url.async_render(parse_result=False) except TemplateError as err: _LOGGER.error("Error parsing template %s: %s", url, err) return {CONF_STILL_IMAGE_URL: "template_error"}, None verify_ssl = info.get(CONF_VERIFY_SSL) auth = generate_auth(info) try: async_client = get_async_client(hass, verify_ssl=verify_ssl)
55,723
219,698
30
python3.10.4/Lib/_pydecimal.py
9
6
def normalize(self, a): a = _convert_other(a, raiseit=True) return a.norm
add python 3.10.4 for windows
normalize
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_pydecimal.py
9
3
https://github.com/XX-net/XX-Net.git
1
27
0
9
44
Python
{ "docstring": "normalize reduces an operand to its simplest form.\n\n Essentially a plus operation with all trailing zeros removed from the\n result.\n\n >>> ExtendedContext.normalize(Decimal('2.1'))\n Decimal('2.1')\n >>> ExtendedContext.normalize(Decimal('-2.0'))\n Decimal('-2')\n >>> ExtendedContext.normalize(Decimal('1.200'))\n Decimal('1.2')\n >>> ExtendedContext.normalize(Decimal('-120'))\n Decimal('-1.2E+2')\n >>> ExtendedContext.normalize(Decimal('120.00'))\n Decimal('1.2E+2')\n >>> ExtendedContext.normalize(Decimal('0.00'))\n Decimal('0')\n >>> ExtendedContext.normalize(6)\n Decimal('6')\n ", "language": "en", "n_whitespaces": 160, "n_words": 41, "vocab_size": 35 }
def normalize(self, a): a = _convert_other(a, raiseit=True) return a.normalize(context=self)
25,698
116,214
74
mindsdb/integrations/handlers/druid_handler/druid_handler.py
24
10
def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame
implemented the get_tables() and get_columns() methods
get_tables
9a0e918bba3439959112a7fd8e5210276b5ac255
mindsdb
druid_handler.py
12
15
https://github.com/mindsdb/mindsdb.git
1
55
0
17
103
Python
{ "docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SELECT *\n FROM INFORMATION_SCHEMA.TABLES\n ", "language": "en", "n_whitespaces": 79, "n_words": 16, "vocab_size": 16 }
def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df[['TABLE_NAME' 'TABLE_TYPE']] result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'}) return result
@require_torch @require_vision
6,261
34,338
487
tests/test_feature_extraction_vilt.py
131
27
def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = self.size / min(w, h) if h < w: newh, neww = self.size, scale * w else: newh, neww = scale * h, self.size max_size = int((1333 / 800) * self.size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expecte
Add ViLT (#14895) * First commit * Add conversion script * Make conversion script work for base model * More improvements * Update conversion script, works for vqa * Add indexing argument to meshgrid * Make conversion script work for ViltForPreTraining * Add ViltForPreTraining to docs * Fix device issue * Add processor * Add MinMaxResize to feature extractor * Implement call method of ViltProcessor * Fix tests * Add integration test * Add loss calculation for VQA * Improve tests * Improve some more tests * Debug tests * Small improvements * Add support for attention_mask * Remove mask_it * Add pixel_mask * Add tests for ViltFeatureExtractor * Improve tests * Add ViltForNaturalLanguageVisualReasoning * Add ViltForNaturalLanguageVisualReasoning to conversion script * Minor fixes * Add support for image_embeds, update docstrings to markdown * Update docs to markdown * Improve conversion script * Rename ViltForPreTraining to ViltForMaskedLM * Improve conversion script * Convert docstrings to markdown * Fix code example of retrieval model * Properly convert masked language model * Add integration test for nlvr * Fix code quality * Apply suggestions from code review * Add copied from statements * Fix pretrained_config_archive_map * Fix docs * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply more suggestions from code review * Make code more readable * Add ViltForNaturalLanguageVisualReasoning to the tests * Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering * Replace pixel_values_2 by single tensor * Add hidden_states and attentions * Fix one more test * Fix all tests * Update year * Fix rebase issues * Fix another rebase issue * Remove ViltForPreTraining from auto mapping * Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval * Make it possible to use BertTokenizerFast in the processor * Use BertTokenizerFast by default * Rename ViltForNaturalLanguageVisualReasoning, define custom model output Co-authored-by: Sylvain Gugger <[email protected]>
get_expected_values
ac227093e41cecb07c7e0f2fc9a504850907bd06
transformers
test_feature_extraction_vilt.py
15
30
https://github.com/huggingface/transformers.git
6
249
1
69
409
Python
{ "docstring": "\n This function computes the expected height and width when providing images to ViltFeatureExtractor,\n assuming do_resize is set to True with a scalar size and size_divisor.\n ", "language": "en", "n_whitespaces": 47, "n_words": 25, "vocab_size": 23 }
def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = self.size / min(w, h) if h < w: newh, neww = self.size, scale * w else: newh, neww = scale * h, self.size max_size = int((1333 / 800) * self.size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width @require_torch @require_vision
70,355
244,366
358
mmdet/models/detectors/base.py
90
22
def preprocss_testing_data(self, data): num_augs = len(data[0]['img']) batch_size = len(data) aug_batch_imgs = [] aug_batch_data_samples = [] # adjust `images` and `data_samples` to a list of list # outer list is test-time augmentation and inter list # is batch dimension for aug_index in range(num_augs): batch_imgs = [] batch_data_samples = [] for batch_index in range(batch_size): single_img = data[batch_index]['img'][aug_index] # to gpu and normalize single_img = single_img.to(self.device) if self.to_rgb and single_img[0].size(0) == 3: single_img = single_img[[2, 1, 0], ...] single_img = (single_img - self.pixel_mean) / self.pixel_std batch_imgs.append(single_img) batch_data_samples.append( data[batch_index]
Simplify api of one-stage detector
preprocss_testing_data
9c5b3331ac8edbfa328922fbab45c382380da540
mmdetection
base.py
14
20
https://github.com/open-mmlab/mmdetection.git
5
164
0
61
265
Python
{ "docstring": " Process input data during training and testing phases.\n Args:\n data (list[dict]): The data to be processed, which\n comes from dataloader. The list indicate the batch dimension.\n Each dict contains these keys:\n\n - `img` (list[Tensor]): Image tensor with different test-time\n augmentation.\n - `data_sample` (list[:obj:`GeneralData`]): Meta information\n and annotations under different test-time augmentation.\n\n\n Returns:\n tuple: It should contain 2 items.\n\n - aug_batch_imgs (list[Tensor]): List of batch image\n tensor. The list indicate the test-time augmentations.\n Note that the batch size always is 1\n when do the augtest.\n - aug_batch_data_samples\n (list[list[:obj:`GeneralData`]], Optional):\n The Data Samples. It usually includes information such as\n `gt_instance`. Return None If the input datas does not\n contain `data_sample`. The outer list indicate the\n number of augmentations and inter list indicate the\n batch dimension.\n ", "language": "en", "n_whitespaces": 457, "n_words": 123, "vocab_size": 86 }
def preprocss_testing_data(self, data): num_augs = len(data[0]['img']) batch_size = len(data) aug_batch_imgs = [] aug_batch_data_samples = [] # adjust `images` and `data_samples` to a list of list # outer list is test-time augmentation and inter list # is batch dimension for aug_index in range(num_augs): batch_imgs = [] batch_data_samples = [] for batch_index in range(batch_size): single_img = data[batch_index]['img'][aug_index] # to gpu and normalize single_img = single_img.to(self.device) if self.to_rgb and single_img[0].size(0) == 3: single_img = single_img[[2, 1, 0], ...] single_img = (single_img - self.pixel_mean) / self.pixel_std batch_imgs.append(single_img) batch_data_samples.append( data[batch_index]['data_sample'][aug_index]) aug_batch_imgs.append(stack_batch(batch_imgs)) aug_batch_data_samples.append(batch_data_samples) return aug_batch_imgs, aug_batch_data_samples
27,573
124,307
22
python/ray/widgets/render.py
8
7
def list_templates() -> List[pathlib.Path]: return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2")
[Core] Add HTML reprs for `ClientContext` and `WorkerContext` (#25730)
list_templates
ea47d97a548504bdb6ff1afdb1021b0bc54d5dfa
ray
render.py
12
8
https://github.com/ray-project/ray.git
1
30
0
8
54
Python
{ "docstring": "List the available HTML templates.\n\n Returns:\n List[pathlib.Path]: A list of files with .html.j2 extensions inside\n ./templates/\n ", "language": "en", "n_whitespaces": 56, "n_words": 16, "vocab_size": 16 }
def list_templates() -> List[pathlib.Path]: return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2")
78,599
266,796
21
test/lib/ansible_test/_internal/python_requirements.py
15
6
def usable_pip_file(path): # type: (t.Optional[str
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
usable_pip_file
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
python_requirements.py
11
2
https://github.com/ansible/ansible.git
3
32
0
13
56
Python
{ "docstring": "Return True if the specified pip file is usable, otherwise False.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def usable_pip_file(path): # type: (t.Optional[str]) -> bool return bool(path) and os.path.exists(path) and bool(os.path.getsize(path)) # Cryptography
@keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support
79,764
268,903
26
keras/metrics/metrics.py
23
16
def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshol
reverting binary accuracy to original
binary_accuracy
8bb1b365ca6bb21b32a1ee1654eecb02570970ac
keras
metrics.py
9
5
https://github.com/keras-team/keras.git
1
67
1
19
123
Python
{ "docstring": "Calculates how often predictions match binary labels.\n\n Standalone usage:\n >>> y_true = [[1], [1], [0], [0]]\n >>> y_pred = [[1], [1], [0], [0]]\n >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n >>> assert m.shape == (4,)\n >>> m.numpy()\n array([1., 1., 1., 1.], dtype=float32)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`\n ", "language": "en", "n_whitespaces": 113, "n_words": 86, "vocab_size": 61 }
def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return backend.mean(tf.equal(y_true, y_pred), axis=-1) @keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support
267
2,224
100
packages/syft/src/syft/core/node/common/node_service/oblv/oblv_messages.py
13
9
def _object2proto(self) -> SyftOblvClient_PB: return SyftOblvClient_PB( token=self.token, oblivious_user_id=self.oblivious_user_id, cookies=self.cookies, h
Changes for publishing data to enclave
_object2proto
fd3b9772cb97127f9f356c1e854dc3b4a436402d
PySyft
oblv_messages.py
9
20
https://github.com/OpenMined/PySyft.git
1
48
0
13
71
Python
{ "docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: SyftOblvClient_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
def _object2proto(self) -> SyftOblvClient_PB: return SyftOblvClient_PB( token=self.token, oblivious_user_id=self.oblivious_user_id, cookies=self.cookies, headers=self.headers, timeout=self.timeout, verify_ssl=self.verify_ssl, )
17,696
83,633
189
zerver/tests/test_digest.py
50
28
def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None: realm = get_realm("zulip") hamlet = self.example_user("hamlet") user_ids = list( UserProfile.objects.filter(is_bot=False, realm=realm).values_list("id", flat=True) ) do_deactivate_us
digest: Don't send emails to deactivated users, even if queued.
test_bulk_handle_digest_email_skips_deactivated_users
fcf82bf0477d7b5c6fe6d26f2458a5acef43dae2
zulip
test_digest.py
13
21
https://github.com/zulip/zulip.git
4
129
0
41
213
Python
{ "docstring": "\n A user id may be added to the queue before the user is deactivated. In such a case,\n the function responsible for sending the email should correctly skip them.\n ", "language": "en", "n_whitespaces": 51, "n_words": 29, "vocab_size": 25 }
def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None: realm = get_realm("zulip") hamlet = self.example_user("hamlet") user_ids = list( UserProfile.objects.filter(is_bot=False, realm=realm).values_list("id", flat=True) ) do_deactivate_user(hamlet, acting_user=None) with mock.patch("zerver.lib.digest.enough_traffic", return_value=True), mock.patch( "zerver.lib.digest.send_future_email" ) as mock_send_email: bulk_handle_digest_email(user_ids, 1) emailed_user_ids = [ call_args[1]["to_user_ids"][0] for call_args in mock_send_email.call_args_list ] self.assertEqual( set(emailed_user_ids), set(user_id for user_id in user_ids if user_id != hamlet.id) )
43,388
181,599
263
tests/driver_tests.py
62
15
def test_driver_4(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s', '42', '-config', 'TPOT light', '-v', '3' ] args = _get_arg_parser
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_driver_4
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
driver_tests.py
17
23
https://github.com/EpistasisLab/tpot.git
2
123
0
51
229
Python
{ "docstring": "Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
def test_driver_4(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s', '42', '-config', 'TPOT light', '-v', '3' ] args = _get_arg_parser().parse_args(args_list) with captured_output() as (out, err): tpot_driver(args) ret_stdout = out.getvalue() assert "TPOT settings" in ret_stdout assert "Final Pareto front testing scores" in ret_stdout try: ret_val = float(ret_stdout.split('\n')[-2].split('\t')[1]) except Exception: ret_val = -float('inf') assert ret_val > 0.0
73,181
249,884
150
tests/handlers/test_sso.py
55
18
async def test_set_avatar(self) -> None: handler = self.hs.get_sso_handler() # Create a new user to set avatar for reg_handler = s
Add support for handling avatar with SSO login (#13917) This commit adds support for handling a provided avatar picture URL when logging in via SSO. Signed-off-by: Ashish Kumar <[email protected]> Fixes #9357.
test_set_avatar
09de2aecb05cb46e0513396e2675b24c8beedb68
synapse
test_sso.py
12
11
https://github.com/matrix-org/synapse.git
1
92
0
45
158
Python
{ "docstring": "Tests successfully setting the avatar of a newly created user", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
async def test_set_avatar(self) -> None: handler = self.hs.get_sso_handler() # Create a new user to set avatar for reg_handler = self.hs.get_registration_handler() user_id = self.get_success(reg_handler.register_user(approved=True)) self.assertTrue( self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) ) # Ensure avatar is set on this newly created user, # so no need to compare for the exact image profile_handler = self.hs.get_profile_handler() profile = self.get_success(profile_handler.get_profile(user_id)) self.assertIsNot(profile["avatar_url"], None)
27,945
125,674
130
python/ray/tune/examples/wandb_example.py
28
14
def tune_decorated(api_key_file): tuner = tune.Tuner( decorated_train_function, tune_config=tune.TuneConfig( metric="loss", mode="min",
[air/tuner/docs] Update docs for Tuner() API 2a: Tune examples (non-docs) (#26931) Splitting up #26884: This PR includes changes to use Tuner() instead of tune.run() for all examples included in python/ray/tune/examples Signed-off-by: xwjiang2010 <[email protected]> Signed-off-by: Kai Fricke <[email protected]> Co-authored-by: xwjiang2010 <[email protected]> Co-authored-by: Richard Liaw <[email protected]>
tune_decorated
8d7b865614f3635def12c42b653f8acd8b4ae56a
ray
wandb_example.py
14
14
https://github.com/ray-project/ray.git
1
87
0
28
136
Python
{ "docstring": "Example for using the @wandb_mixin decorator with the function API", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def tune_decorated(api_key_file): tuner = tune.Tuner( decorated_train_function, tune_config=tune.TuneConfig( metric="loss", mode="min", ), param_space={ "mean": tune.grid_search([1, 2, 3, 4, 5]), "sd": tune.uniform(0.2, 0.8), "wandb": {"api_key_file": api_key_file, "project": "Wandb_example"}, }, ) tuner.fit()
12,200
60,540
45
.venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py
16
8
def format_usage(self, usage): # type: (str) -> str msg = "\nUsage: {}\n".format(self.indent_lines(text
upd; format
format_usage
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
parser.py
12
3
https://github.com/jindongwang/transferlearning.git
1
30
0
15
56
Python
{ "docstring": "\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
def format_usage(self, usage): # type: (str) -> str msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " ")) return msg
48,817
198,195
112
sympy/matrices/expressions/matexpr.py
35
16
def from_index_summation(expr, first_index=None, last_index=None, dimensions=None): r from sympy.tensor.array.expressions.from_indexed_to_array import convert_index
Rename files for array expression conversions in order to avoid naming conflicts in TAB-completion of the corresponding functions
from_index_summation
a69c49bec6caf2cb460dc4eedf0fec184db92f0e
sympy
matexpr.py
9
50
https://github.com/sympy/sympy.git
3
86
0
28
126
Python
{ "docstring": "\n Parse expression of matrices with explicitly summed indices into a\n matrix expression without indices, if possible.\n\n This transformation expressed in mathematical notation:\n\n `\\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \\Longrightarrow \\mathbf{A}\\cdot \\mathbf{B}`\n\n Optional parameter ``first_index``: specify which free index to use as\n the index starting the expression.\n\n Examples\n ========\n\n >>> from sympy import MatrixSymbol, MatrixExpr, Sum\n >>> from sympy.abc import i, j, k, l, N\n >>> A = MatrixSymbol(\"A\", N, N)\n >>> B = MatrixSymbol(\"B\", N, N)\n >>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n A*B\n\n Transposition is detected:\n\n >>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n A.T*B\n\n Detect the trace:\n\n >>> expr = Sum(A[i, i], (i, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n Trace(A)\n\n More complicated expressions:\n\n >>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n A*B.T*A.T\n ", "language": "en", "n_whitespaces": 330, "n_words": 133, "vocab_size": 90 }
def from_index_summation(expr, first_index=None, last_index=None, dimensions=None): r from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix first_indices = [] if first_index is not None: first_indices.append(first_index) if last_index is not None: first_indices.append(last_index) arr = convert_indexed_to_array(expr, first_indices=first_indices) return convert_array_to_matrix(arr)
25,253
114,699
46
mindsdb/integrations/mysql_handler/mysql_handler.py
18
6
def get_views(self):
Update mysql handler
get_views
5c2ce68a8eb8b992ab841db3d3a6b4694ecd244b
mindsdb
mysql_handler.py
9
4
https://github.com/mindsdb/mindsdb.git
1
20
0
16
43
Python
{ "docstring": "\n Get more information about specific database views\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def get_views(self): q = f"SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';" result = self.native_query(q) return result
30,118
133,814
93
rllib/agents/qmix/qmix_policy.py
55
18
def _mac(model, obs, h): B, n_agents = obs.size(0), obs.size(1) if not isinstance(obs, dict): obs = {"obs": obs} obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()} h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, h_flat = model(obs_agents_as_batches, h_flat, None) return q_flat.reshape([B, n_agents, -1]), [ s.reshape([B, n_agents, -1]) for s in h_fl
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
_mac
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
qmix_policy.py
11
10
https://github.com/ray-project/ray.git
5
130
0
41
198
Python
{ "docstring": "Forward pass of the multi-agent controller.\n\n Args:\n model: TorchModelV2 class\n obs: Tensor of shape [B, n_agents, obs_size]\n h: List of tensors of shape [B, n_agents, h_size]\n\n Returns:\n q_vals: Tensor of shape [B, n_agents, n_actions]\n h: Tensor of shape [B, n_agents, h_size]\n ", "language": "en", "n_whitespaces": 85, "n_words": 41, "vocab_size": 23 }
def _mac(model, obs, h): B, n_agents = obs.size(0), obs.size(1) if not isinstance(obs, dict): obs = {"obs": obs} obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()} h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, h_flat = model(obs_agents_as_batches, h_flat, None) return q_flat.reshape([B, n_agents, -1]), [ s.reshape([B, n_agents, -1]) for s in h_flat ]
19,794
100,294
66
tools/alignments/jobs.py
31
12
def _get_count(self): has_meta = all(val is not None for val in self._alignments.video_meta_data.values()) retval = len(self._alignments.video_meta_data["pts_time"]) if has_meta else None logger.debug("Frame count from alignments file: (has
alignments tool - Don't re-analyze video if metadata in alignments
_get_count
30872ef265c0fc29465f4c3a0778d0049f8c3897
faceswap
jobs.py
13
5
https://github.com/deepfakes/faceswap.git
3
56
0
27
91
Python
{ "docstring": " If the alignments file has been run through the manual tool, then it will hold video\n meta information, meaning that the count of frames in the alignment file can be relied\n on to be accurate.\n\n Returns\n -------\n int or ``None``\n For video input which contain video meta-data in the alignments file then the count of\n frames is returned. In all other cases ``None`` is returned\n ", "language": "en", "n_whitespaces": 122, "n_words": 65, "vocab_size": 47 }
def _get_count(self): has_meta = all(val is not None for val in self._alignments.video_meta_data.values()) retval = len(self._alignments.video_meta_data["pts_time"]) if has_meta else None logger.debug("Frame count from alignments file: (has_meta: %s, %s", has_meta, retval) return retval
43,447
181,659
38
tests/one_hot_encoder_tests.py
8
6
def test_sparse1_with_non_sparse_components(): fit_then_transform(
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_sparse1_with_non_sparse_components
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
one_hot_encoder_tests.py
9
6
https://github.com/EpistasisLab/tpot.git
1
23
0
8
38
Python
{ "docstring": "Test fit_transform a sparse matrix with specifying categorical_features.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_sparse1_with_non_sparse_components(): fit_then_transform( sparse1_paratial_1h.todense(), sparse1, categorical_features=[True, False] )
38,532
160,160
78
numpy/f2py/tests/test_f2py2e.py
41
21
def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): MNAME = "hi" foutl = get_io_paths(hello_world_f90, mname=MNAME) ipath = foutl.f90inp monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) with util.switchdir(ipath.parent): f2pycli() # Always generate C module assert Path.exists(fou
TST: Initialize f2py2e tests of the F2PY CLI (#20668) Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff. More importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.
test_mod_gen_f77
729ad4f92420231e2a7009b3223c6c7620b8b808
numpy
test_f2py2e.py
11
9
https://github.com/numpy/numpy.git
1
74
0
37
134
Python
{ "docstring": "Checks the generation of files based on a module name\n CLI :: -m\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): MNAME = "hi" foutl = get_io_paths(hello_world_f90, mname=MNAME) ipath = foutl.f90inp monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) with util.switchdir(ipath.parent): f2pycli() # Always generate C module assert Path.exists(foutl.cmodf) # File contains a function, check for F77 wrappers assert Path.exists(foutl.wrap77)
78,577
266,774
217
test/lib/ansible_test/_internal/delegation.py
51
31
def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None assert isinstance(args, EnvironmentConfig) with delegation_context(args, host_state): if isinstance(args, TestConfig): args.metadata.ci_provider = get_ci_provider().code make_dirs(ResultType.TMP.path) with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd: args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name)) args.metadata.to_file(args.metadata_path) try: delegate_command(args, host_state, exclude, require) finally: args.metadata_path = None else: delegate_command(args, host_state, exclude, require)
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
delegate
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
delegation.py
17
15
https://github.com/ansible/ansible.git
3
146
0
39
232
Python
{ "docstring": "Delegate execution of ansible-test to another environment.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None assert isinstance(args, EnvironmentConfig) with delegation_context(args, host_state): if isinstance(args, TestConfig): args.metadata.ci_provider = get_ci_provider().code make_dirs(ResultType.TMP.path) with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd: args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name)) args.metadata.to_file(args.metadata_path) try: delegate_command(args, host_state, exclude, require) finally: args.metadata_path = None else: delegate_command(args, host_state, exclude, require)
6,819
37,514
40
src/transformers/testing_utils.py
21
9
def require_torch_non_multi_gpu(test_case): if not is_torch_available(): return unittest.skip("test requires PyTorch")(test_case) import torch return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_cas
Update all require decorators to use skipUnless when possible (#16999)
require_torch_non_multi_gpu
57e6464ac9a31156f1c93e59107323e6ec01309e
transformers
testing_utils.py
12
5
https://github.com/huggingface/transformers.git
2
44
0
19
79
Python
{ "docstring": "\n Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
def require_torch_non_multi_gpu(test_case): if not is_torch_available(): return unittest.skip("test requires PyTorch")(test_case) import torch return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
53,412
212,800
91
PySimpleGUI.py
27
9
def _ReturnKeyHandler(self, event): # if the element is disabled, ignore the
If an element is disabled, then don't generate events for it (specifically for Input element in this case)
_ReturnKeyHandler
47047700dd76c40c4471635a7de5f770d5c23c02
PySimpleGUI
PySimpleGUI.py
9
7
https://github.com/PySimpleGUI/PySimpleGUI.git
3
38
0
21
65
Python
{ "docstring": "\n Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked.\n\n :param event:\n :type event:\n\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 29 }
def _ReturnKeyHandler(self, event): # if the element is disabled, ignore the event if self.Disabled: return MyForm = self.ParentForm button_element = self._FindReturnKeyBoundButton(MyForm) if button_element is not None: button_element.ButtonCallBack()
@pytest.fixture
39,556
164,367
32
pandas/tests/frame/conftest.py
17
8
def uint64_frame(): return DataFrame( {
⬆️ UPGRADE: Autoupdate pre-commit config (#45752) Co-authored-by: MarcoGorelli <[email protected]>
uint64_frame
419331c598a097896edae40bc0687e4127f97b6b
pandas
conftest.py
12
4
https://github.com/pandas-dev/pandas.git
1
45
1
15
80
Python
{ "docstring": "\n Fixture for DataFrame with uint64 values\n\n Columns are ['A', 'B']\n ", "language": "en", "n_whitespaces": 20, "n_words": 10, "vocab_size": 10 }
def uint64_frame(): return DataFrame( {"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64 ) @pytest.fixture
49,839
200,995
48
tests/annotations/tests.py
9
12
def test_null_annotation(self):
Refs #33476 -- Reformatted code with Black.
test_null_annotation
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
16
5
https://github.com/django/django.git
1
39
0
9
66
Python
{ "docstring": "\n Annotating None onto a model round-trips\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
def test_null_annotation(self): book = Book.objects.annotate( no_value=Value(None, output_field=IntegerField()) ).first() self.assertIsNone(book.no_value)
41,731
176,161
233
networkx/generators/small.py
51
5
def icosahedral_graph(create_using=None): description = [ "adjacencylist", "Platonic Icosahedral Graph", 12, [ [2, 6, 8, 9, 12], [3, 6, 7, 9], [4, 7, 9, 10], [5, 7, 10, 11], [
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
icosahedral_graph
dec723f072eb997a497a159dbe8674cd39999ee9
networkx
small.py
9
22
https://github.com/networkx/networkx.git
1
117
0
37
150
Python
{ "docstring": "\n Returns the Platonic Icosahedral graph.\n\n The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph\n whose nodes have the connectivity of the icosahedron. It is undirected,\n regular and Hamiltonian [1]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Icosahedral graph with 12 nodes and 30 edges.\n\n References\n ----------\n .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html\n ", "language": "en", "n_whitespaces": 129, "n_words": 73, "vocab_size": 52 }
def icosahedral_graph(create_using=None): description = [ "adjacencylist", "Platonic Icosahedral Graph", 12, [ [2, 6, 8, 9, 12], [3, 6, 7, 9], [4, 7, 9, 10], [5, 7, 10, 11], [6, 7, 11, 12], [7, 12], [], [9, 10, 11, 12], [10], [11], [12], [], ], ] G = make_small_undirected_graph(description, create_using) return G
118,342
323,029
274
examples/biomedical/cblue/train_spo.py
93
44
def evaluate(model, criterion, metric, data_loader): model.eval() metric.reset() losses = [] for batch in tqdm(data_loader): input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch max_batch_len = input_ids.shape[-1] ent_mask = paddle.unsqueeze(masks, axis=2) spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True) spo_mask = paddle.unsqueeze(spo_mask, axis=1) logits = model(input_ids, token_type_ids, position_ids) ent_loss = criterion( logits[0], ent_label[0], weight=ent_mask, reduction='sum') spo_loss = criterion( logits[1], spo_label[0], weight=spo_mask, reduction='sum') loss = ent_loss + spo_loss losses.append(loss.numpy()) lengths = paddle.sum(masks, axis=-1) correct = metric.compute(lengths, logits[0], logits[1], ent_label[1], spo_label[1]) metric.update(correct) results = metric.accumulate() print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' % (np.mean(losses), results['entity'][2], results['spo'][2])) model.train() metric.reset()
[ehealth] fix problems for dynamic2static
evaluate
aa82dc06668ddca275e3a350d4c2793e4961086c
PaddleNLP
train_spo.py
12
26
https://github.com/PaddlePaddle/PaddleNLP.git
2
256
0
69
389
Python
{ "docstring": "\n Given a dataset, it evals model and compute the metric.\n Args:\n model(obj:`paddle.nn.Layer`): A model to classify texts.\n dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.\n criterion(`paddle.nn.functional`): It can compute the loss.\n metric(obj:`paddle.metric.Metric`): The evaluation metric.\n ", "language": "en", "n_whitespaces": 72, "n_words": 34, "vocab_size": 29 }
def evaluate(model, criterion, metric, data_loader): model.eval() metric.reset() losses = [] for batch in tqdm(data_loader): input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch max_batch_len = input_ids.shape[-1] ent_mask = paddle.unsqueeze(masks, axis=2) spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True) spo_mask = paddle.unsqueeze(spo_mask, axis=1) logits = model(input_ids, token_type_ids, position_ids) ent_loss = criterion( logits[0], ent_label[0], weight=ent_mask, reduction='sum') spo_loss = criterion( logits[1], spo_label[0], weight=spo_mask, reduction='sum') loss = ent_loss + spo_loss losses.append(loss.numpy()) lengths = paddle.sum(masks, axis=-1) correct = metric.compute(lengths, logits[0], logits[1], ent_label[1], spo_label[1]) metric.update(correct) results = metric.accumulate() print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' % (np.mean(losses), results['entity'][2], results['spo'][2])) model.train() metric.reset()
51,174
205,717
66
django/db/models/options.py
16
6
def get_fields(self, include_parents=True, include_hidden=False): if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields(
Refs #33476 -- Reformatted code with Black.
get_fields
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
options.py
8
6
https://github.com/django/django.git
2
35
0
15
55
Python
{ "docstring": "\n Return a list of fields associated to the model. By default, include\n forward and reverse fields, fields derived from inheritance, but not\n hidden fields. The returned fields can be changed using the parameters:\n\n - include_parents: include fields derived from inheritance\n - include_hidden: include fields that have a related_name that\n starts with a \"+\"\n ", "language": "en", "n_whitespaces": 123, "n_words": 53, "vocab_size": 40 }
def get_fields(self, include_parents=True, include_hidden=False): if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields( include_parents=include_parents, include_hidden=include_hidden )
50,690
204,309
119
django/contrib/sessions/backends/file.py
48
13
def _key_to_file(self, session_key=None): if session_key is None: session_key
Refs #33476 -- Reformatted code with Black.
_key_to_file
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
file.py
10
6
https://github.com/django/django.git
3
56
0
41
96
Python
{ "docstring": "\n Get the file associated with this session key.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def _key_to_file(self, session_key=None): if session_key is None: session_key = self._get_or_create_session_key() # Make sure we're not vulnerable to directory traversal. Session keys # should always be md5s, so they should never contain directory # components. if not set(session_key).issubset(VALID_KEY_CHARS): raise InvalidSessionKey("Invalid characters in session key") return os.path.join(self.storage_path, self.file_prefix + session_key)
12,436
61,197
200
.venv/lib/python3.8/site-packages/pip/_internal/utils/hashes.py
47
20
def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None gots = {} for hash_name in self._allowed.keys(): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise Installati
upd; format
check_against_chunks
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
hashes.py
14
14
https://github.com/jindongwang/transferlearning.git
7
101
0
38
167
Python
{ "docstring": "Check good hashes against ones built from iterable of chunks of\n data.\n\n Raise HashMismatch if none match.\n\n ", "language": "en", "n_whitespaces": 38, "n_words": 17, "vocab_size": 16 }
def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None gots = {} for hash_name in self._allowed.keys(): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError(f"Unknown hash name: {hash_name}") for chunk in chunks: for hash in gots.values(): hash.update(chunk) for hash_name, got in gots.items(): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots)
70,252
244,125
169
mmdet/models/losses/cross_entropy_loss.py
61
19
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(
[Fix] Fix reduction=mean in CELoss. (#7449) * [Fix] Fix ignore in CELoss. * add ut * fix and add comments * add avg_non_ignore option * bce avg * fix lint
_expand_onehot_labels
3b2e9655631a2edd28bb94c640bd6a74c0bfad55
mmdetection
cross_entropy_loss.py
14
15
https://github.com/open-mmlab/mmdetection.git
3
147
0
42
223
Python
{ "docstring": "Expand onehot labels to match the size of prediction.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), label_channels).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) bin_label_weights *= valid_mask return bin_labels, bin_label_weights, valid_mask
80,852
271,751
227
keras/engine/training_test.py
55
24
def test_sequence_input_types(self, input_type): if not tf.executing_eagerly(): self.skipTest("Improved checking is only present in data_adapter.") xy_function, x_function = self._make_sequence_input_functions( input_type ) fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {} if input_type == "generator": fit_kwargs["steps_per_epoch"] = 4 evaluate_kwargs["steps"] = 4 predict_kwargs["steps"] = 4 model = test_utils.get_small_mlp(1, 1, 1) model.compile( loss="mse", optimizer="sgd", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(xy_function(use_namedtuple=
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
test_sequence_input_types
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_test.py
10
20
https://github.com/keras-team/keras.git
3
144
0
44
239
Python
{ "docstring": "Ensure that namedtuples and tuples are plumbed identically.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_sequence_input_types(self, input_type): if not tf.executing_eagerly(): self.skipTest("Improved checking is only present in data_adapter.") xy_function, x_function = self._make_sequence_input_functions( input_type ) fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {} if input_type == "generator": fit_kwargs["steps_per_epoch"] = 4 evaluate_kwargs["steps"] = 4 predict_kwargs["steps"] = 4 model = test_utils.get_small_mlp(1, 1, 1) model.compile( loss="mse", optimizer="sgd", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(xy_function(use_namedtuple=False), **fit_kwargs) model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs) model.predict(x_function(use_namedtuple=False), **predict_kwargs)
@frappe.whitelist()
14,704
68,000
70
erpnext/stock/utils.py
100
25
def get_stock_value_on(warehouse=None, posting_date=None, item_code=None): if not posting_date: posting_date = nowdate() values, condition = [posting_date], "" if warehouse: lft, rgt, is_group = frappe.db.get_value("Warehouse", w
style: format code with black
get_stock_value_on
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
utils.py
13
35
https://github.com/frappe/erpnext.git
7
172
1
75
294
Python
{ "docstring": "\n\t\tSELECT item_code, stock_value, name, warehouse\n\t\tFROM `tabStock Ledger Entry` sle\n\t\tWHERE posting_date <= %s {0}\n\t\t\tand is_cancelled = 0\n\t\tORDER BY timestamp(posting_date, posting_time) DESC, creation DESC\n\t", "language": "en", "n_whitespaces": 21, "n_words": 26, "vocab_size": 26 }
def get_stock_value_on(warehouse=None, posting_date=None, item_code=None): if not posting_date: posting_date = nowdate() values, condition = [posting_date], "" if warehouse: lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"]) if is_group: values.extend([lft, rgt]) condition += "and exists (\ select name from `tabWarehouse` wh where wh.name = sle.warehouse\ and wh.lft >= %s and wh.rgt <= %s)" else: values.append(warehouse) condition += " AND warehouse = %s" if item_code: values.append(item_code) condition += " AND item_code = %s" stock_ledger_entries = frappe.db.sql( .format( condition ), values, as_dict=1, ) sle_map = {} for sle in stock_ledger_entries: if not (sle.item_code, sle.warehouse) in sle_map: sle_map[(sle.item_code, sle.warehouse)] = flt(sle.stock_value) return sum(sle_map.values()) @frappe.whitelist()
53,802
215,084
25
salt/modules/aixpkg.py
13
6
def _is_installed_rpm(name): log.debug(f"_is_installed_rpm '{name}'") cmd = ["/usr/bin/rpm", "-q", name] return __sal
Working tests for install
_is_installed_rpm
f1c37893caf90738288e789c3233ab934630254f
salt
aixpkg.py
9
4
https://github.com/saltstack/salt.git
1
32
0
13
62
Python
{ "docstring": "\n Returns True if the rpm package is installed. Otherwise returns False.\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
def _is_installed_rpm(name): log.debug(f"_is_installed_rpm '{name}'") cmd = ["/usr/bin/rpm", "-q", name] return __salt__["cmd.retcode"](cmd) == 0
34,219
148,283
35
python/ray/_private/thirdparty/pathspec/util.py
45
14
def iter_tree_files(root, on_error=None, follow_links=None): if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links): if not entry.is_dir(follow_links): yield entry.path # Alias `iter_tree_files()` as `iter_tree()`. iter_tree = iter_tree_files
[Bugfix] fix invalid excluding of Black (#24042) - We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options - Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.
iter_tree_files
0e6c042e29cbbe429d81c9c1af3c75c261f00980
ray
util.py
12
8
https://github.com/ray-project/ray.git
6
81
0
36
136
Python
{ "docstring": "\n\tWalks the specified directory for all files.\n\n\t*root* (:class:`str`) is the root directory to search for files.\n\n\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\toptionally is the error handler for file-system exceptions. It will be\n\tcalled with the exception (:exc:`OSError`). Reraise the exception to\n\tabort the walk. Default is :data:`None` to ignore file-system\n\texceptions.\n\n\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether\n\tto walk symbolic links that resolve to directories. Default is\n\t:data:`None` for :data:`True`.\n\n\tRaises :exc:`RecursionError` if recursion is detected.\n\n\tReturns an :class:`~collections.abc.Iterable` yielding the path to\n\teach file (:class:`str`) relative to *root*.\n\t", "language": "en", "n_whitespaces": 77, "n_words": 90, "vocab_size": 59 }
def iter_tree_files(root, on_error=None, follow_links=None): if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links): if not entry.is_dir(follow_links): yield entry.path # Alias `iter_tree_files()` as `iter_tree()`. iter_tree = iter_tree_files
12,362
60,970
240
.venv/lib/python3.8/site-packages/pip/_internal/req/constructors.py
100
16
def _get_url_from_path(path, name): # type: (str, str) -> Optional[str] if _looks_like_path(name) and os.path.isdir(path): if is_installable_dir(path): return path_to_url(path) raise InstallationError( f"Directory {name!r} is not installable. Neither 'setup.py' " "nor 'pyproject.toml' found." ) if not is_archive_file(path): return None if os.path.isfile(path): return path_to_url(path) urlreq_parts = name.split('@', 1)
upd; format
_get_url_from_path
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
constructors.py
12
21
https://github.com/jindongwang/transferlearning.git
8
108
0
73
191
Python
{ "docstring": "\n First, it checks whether a provided path is an installable directory\n (e.g. it has a setup.py). If it is, returns the path.\n\n If false, check if the path is an archive file (such as a .whl).\n The function checks if the path is a file. If false, if the path has\n an @, it will treat it as a PEP 440 URL requirement and return the path.\n ", "language": "en", "n_whitespaces": 86, "n_words": 67, "vocab_size": 39 }
def _get_url_from_path(path, name): # type: (str, str) -> Optional[str] if _looks_like_path(name) and os.path.isdir(path): if is_installable_dir(path): return path_to_url(path) raise InstallationError( f"Directory {name!r} is not installable. Neither 'setup.py' " "nor 'pyproject.toml' found." ) if not is_archive_file(path): return None if os.path.isfile(path): return path_to_url(path) urlreq_parts = name.split('@', 1) if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): # If the path contains '@' and the part before it does not look # like a path, try to treat it as a PEP 440 URL req instead. return None logger.warning( 'Requirement %r looks like a filename, but the ' 'file does not exist', name ) return path_to_url(path)
53,808
215,091
228
tests/pytests/unit/modules/test_aixpkg.py
61
18
def test_install_fileset_with_bff_extension(): installp_call = MagicMock(return_value={"retcode": 0, "stdout": ""}) fileset_pkg_name = ( "/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff" ) list_pkgs_mock = MagicMock( side_effect=[{"bos.rte.printers": "7.1.6.0"}, {"bos.rte.printers": "7.2.4.0"}] ) with patch("pathlib.Path.is_file", return_value=True): with patch.dict( aixpkg.__salt__, {"cmd.run_all": installp_call, "config.get": MagicMock(return_value=False)}, ), patch.object(aixpkg, "list_pkgs", list_pkgs_mock): result = aixpkg.install(fileset_pkg_name) assert installp_call.call_count ==
Working tests for install
test_install_fileset_with_bff_extension
f1c37893caf90738288e789c3233ab934630254f
salt
test_aixpkg.py
16
21
https://github.com/saltstack/salt.git
1
137
0
49
248
Python
{ "docstring": "\n Test install of fileset with bff extension\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
def test_install_fileset_with_bff_extension(): installp_call = MagicMock(return_value={"retcode": 0, "stdout": ""}) fileset_pkg_name = ( "/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff" ) list_pkgs_mock = MagicMock( side_effect=[{"bos.rte.printers": "7.1.6.0"}, {"bos.rte.printers": "7.2.4.0"}] ) with patch("pathlib.Path.is_file", return_value=True): with patch.dict( aixpkg.__salt__, {"cmd.run_all": installp_call, "config.get": MagicMock(return_value=False)}, ), patch.object(aixpkg, "list_pkgs", list_pkgs_mock): result = aixpkg.install(fileset_pkg_name) assert installp_call.call_count == 1 installp_call.assert_any_call( "/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.rte.printers_7.2.2.0.bff", python_shell=False, ) expected = {"bos.rte.printers": {"old": "7.1.6.0", "new": "7.2.4.0"}} assert result == expected
14,123
66,174
4
erpnext/hr/doctype/leave_block_list/leave_block_list.py
9
7
def is_user_in_allow_list(block_list): return frappe.ses
style: format code with black
is_user_in_allow_list
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
leave_block_list.py
9
6
https://github.com/frappe/erpnext.git
1
23
0
9
37
Python
{ "docstring": "select allow_user\n\t\tfrom `tabLeave Block List Allow` where parent=%s", "language": "en", "n_whitespaces": 7, "n_words": 9, "vocab_size": 9 }
def is_user_in_allow_list(block_list): return frappe.session.user in frappe.db.sql_list( , block_list, )
54,495
216,276
91
tests/pytests/functional/transport/server/test_req_channel.py
34
15
def test_normalization(push_channel): types = { "list": list, } msgs = [ {"list": tuple([1, 2, 3])}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1)
Fix minion unit tests, specifically .../tests/pytests/test_minion.py
test_normalization
3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7
salt
test_req_channel.py
12
11
https://github.com/saltstack/salt.git
3
78
0
30
124
Python
{ "docstring": "\n Since we use msgpack, we need to test that list types are converted to lists\n ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 13 }
def test_normalization(push_channel): types = { "list": list, } msgs = [ {"list": tuple([1, 2, 3])}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) for key, value in ret["load"].items(): assert types[key] == type(value)
40,230
168,206
219
pandas/core/arrays/datetimes.py
73
24
def to_perioddelta(self, freq) -> TimedeltaArray: # Deprecaation GH#34853 warnings.warn( "to_perioddelta is deprecated and will be removed in a " "future version. " "Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex stacklevel=find_stack_level(inspect.curre
PERF cache find_stack_level (#48023) cache stacklevel
to_perioddelta
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
datetimes.py
12
27
https://github.com/pandas-dev/pandas.git
2
87
0
63
152
Python
{ "docstring": "\n Calculate deltas between self values and self converted to Periods at a freq.\n\n Used for vectorized offsets.\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n TimedeltaArray/Index\n ", "language": "en", "n_whitespaces": 90, "n_words": 26, "vocab_size": 25 }
def to_perioddelta(self, freq) -> TimedeltaArray: # Deprecaation GH#34853 warnings.warn( "to_perioddelta is deprecated and will be removed in a " "future version. " "Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.arrays.timedeltas import TimedeltaArray if self._ndarray.dtype != "M8[ns]": raise NotImplementedError("Only supported for nanosecond resolution.") i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view("m8[ns]") return TimedeltaArray(m8delta) # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods
4,407
22,678
59
linear-algebra-python/src/lib.py
16
6
def set(self, components): if len(components) > 0:
refactor: clean code Signed-off-by: slowy07 <[email protected]>
set
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
lib.py
11
5
https://github.com/geekcomputers/Python.git
2
28
0
16
50
Python
{ "docstring": "\n input: new components\n changes the components of the vector.\n replace the components with newer one.\n ", "language": "en", "n_whitespaces": 44, "n_words": 15, "vocab_size": 11 }
def set(self, components): if len(components) > 0: self.__components = components else: raise Exception("please give any vector")
24,742
112,742
123
nni/algorithms/compression/v2/pytorch/pruning/tools/base.py
43
24
def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]: if self._best_task_id is not None: compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth')) compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth')) with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f: config_list = json_tricks.load(f) return self._best_task_id,
[Compression] fix typehints (#4800)
get_best_result
cbac2c5c0f7606aca8ccf08fbd418ffe3adfe427
nni
base.py
15
15
https://github.com/microsoft/nni.git
2
128
0
35
199
Python
{ "docstring": "\n Returns\n -------\n Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]\n If self._best_task_id is not None,\n return best task id, best compact model, masks on the compact model, score, config list used in this task.\n ", "language": "en", "n_whitespaces": 84, "n_words": 33, "vocab_size": 29 }
def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]: if self._best_task_id is not None: compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth')) compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth')) with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f: config_list = json_tricks.load(f) return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list return None
70,113
243,757
75
src/PIL/ImageFont.py
21
9
def set_variation_by_axes(self, axes): try: self.font.setvaraxes(
Improve exception traceback readability
set_variation_by_axes
2ae55ccbdad9c842929fb238ea1eb81d1f999024
Pillow
ImageFont.py
10
6
https://github.com/python-pillow/Pillow.git
2
33
0
21
58
Python
{ "docstring": "\n :param axes: A list of values for each axis.\n :exception OSError: If the font is not a variation font.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
def set_variation_by_axes(self, axes): try: self.font.setvaraxes(axes) except AttributeError as e: msg = "FreeType 2.9.1 or greater is required" raise NotImplementedError(msg) from e
56,214
221,111
57
python3.10.4/Lib/bdb.py
14
9
def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise BdbQuit return self.trace_dispatch
add python 3.10.4 for windows
dispatch_line
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bdb.py
9
5
https://github.com/XX-net/XX-Net.git
4
40
0
13
66
Python
{ "docstring": "Invoke user function and return trace function for line event.\n\n If the debugger stops on the current line, invoke\n self.user_line(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ", "language": "en", "n_whitespaces": 62, "n_words": 34, "vocab_size": 32 }
def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise BdbQuit return self.trace_dispatch
31,440
138,497
110
python/ray/data/impl/plan.py
36
7
def clear(self) -> None: self._in_blocks.clear() self._snapshot_blocks = None self._snapshot_stats = None #
[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931) This PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3). The fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.
clear
9ee24530abf1b5e3239869b5257dd7b678337b90
ray
plan.py
9
11
https://github.com/ray-project/ray.git
1
44
0
28
76
Python
{ "docstring": "Clear all cached block references of this plan, including input blocks.\n\n This will render the plan un-executable unless the root is a LazyBlockList.", "language": "en", "n_whitespaces": 29, "n_words": 23, "vocab_size": 22 }
def clear(self) -> None: self._in_blocks.clear() self._snapshot_blocks = None self._snapshot_stats = None # We're erasing the snapshot, so put all stages into the "after snapshot" # bucket. self._stages_after_snapshot = ( self._stages_before_snapshot + self._stages_after_snapshot ) self._stages_before_snapshot = []
76,190
260,331
446
sklearn/cluster/_birch.py
131
25
def _global_clustering(self, X=None): clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_la
MAINT validate parameters in Birch (#23593) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
_global_clustering
24c2448cc7687fbacbc3a9af13f47a935dfcbeeb
scikit-learn
_birch.py
15
23
https://github.com/scikit-learn/scikit-learn.git
8
151
0
88
249
Python
{ "docstring": "\n Global clustering for the subclusters obtained after fitting\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def _global_clustering(self, X=None): clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, Integral): clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True # To use in predict to avoid recalculation. self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( "Number of subclusters found (%d) by BIRCH is less " "than (%d). Decrease the threshold." % (len(centroids), self.n_clusters), ConvergenceWarning, ) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) if compute_labels: self.labels_ = self._predict(X)
@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field']) @mock.patch('airflow.utils.module_loading.import_string') @mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)
9,302
47,930
65
tests/www/views/test_views_connection.py
41
23
def test_prefill_form_backcompat(extras, expected): mock_form = mock.Mock() mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"} cmv = ConnectionModelView() cmv.extra_fields = ['extra__test__my_param'] # this is set by `lazy_add_provider_discovered_options_to_connection_form` cmv.extra_field_name
Enable use of custom conn extra fields without prefix (#22607) Previously, connection "extra" fields which were added as custom fields in the webserver connection form had to be named with prefix `extra__<conn_type>__`. This was because custom fields are registered globally on the connection view model, so the prefix was necessary to prevent collisions. But the prefix is ugly and cumbersome in the `extra` field. So now what we do is add this prefix when defining the field internally in the model, and strip it when saving the connection. This doesn't change any providers -- each of those will have to be updated in order to use no-prefix custom fields, with special care to handle backcompat.
test_prefill_form_backcompat
1dfae80412377eef0a38637535d6a1d3393cc4fe
airflow
test_views_connection.py
10
8
https://github.com/apache/airflow.git
1
77
1
37
197
Python
{ "docstring": "\n When populating custom fields in the connection form we should first check for the non-prefixed\n value (since prefixes in extra are deprecated) and then fallback to the prefixed value.\n\n Either way, the field is known internally to the model view as the prefixed value.\n ", "language": "en", "n_whitespaces": 57, "n_words": 44, "vocab_size": 35 }
def test_prefill_form_backcompat(extras, expected): mock_form = mock.Mock() mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"} cmv = ConnectionModelView() cmv.extra_fields = ['extra__test__my_param'] # this is set by `lazy_add_provider_discovered_options_to_connection_form` cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param' cmv.prefill_form(form=mock_form, pk=1) assert mock_form.extra__test__my_param.data == expected @pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field']) @mock.patch('airflow.utils.module_loading.import_string') @mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)
18,967
93,030
313
src/sentry/search/utils.py
98
24
def tokenize_query(query): result = defaultdict(list) query_params = defaultdict(list) tokens = split_query_into_tokens(query) for token in tokens: if token.upper() in ["OR", "AND"] or token.strip("()") == "": continue state = "query" for idx, char in enumerate(token): next_char = token[idx + 1] if idx < len(token) - 1 else None if idx == 0 and char in ('"', "'", ":"): break if char == ":": if next_char in (":", " "): state = "query"
ref: replace legacy compat.map with list comprehensions (#36372)
tokenize_query
522d6f27c28dc5fd4d996ed605865c42fbda0da8
sentry
utils.py
16
25
https://github.com/getsentry/sentry.git
13
185
0
63
322
Python
{ "docstring": "\n Tokenizes a standard Sentry search query.\n\n Example:\n >>> query = 'is:resolved foo bar tag:value'\n >>> tokenize_query(query)\n {\n 'is': ['resolved'],\n 'query': ['foo', 'bar'],\n 'tag': ['value'],\n }\n\n Has a companion implementation in static/app/utils/tokenizeSearch.tsx\n ", "language": "en", "n_whitespaces": 77, "n_words": 31, "vocab_size": 29 }
def tokenize_query(query): result = defaultdict(list) query_params = defaultdict(list) tokens = split_query_into_tokens(query) for token in tokens: if token.upper() in ["OR", "AND"] or token.strip("()") == "": continue state = "query" for idx, char in enumerate(token): next_char = token[idx + 1] if idx < len(token) - 1 else None if idx == 0 and char in ('"', "'", ":"): break if char == ":": if next_char in (":", " "): state = "query" else: state = "tags" break query_params[state].append(token) if "query" in query_params: result["query"] = [format_query(query) for query in query_params["query"]] for tag in query_params["tags"]: key, value = format_tag(tag) result[key].append(value) return dict(result)
@keras_export("keras.models.load_model")
83,344
280,311
410
keras/saving/saving_api.py
110
21
def save_model(model, filepath, overwrite=True, save_format=None, **kwargs): save_format = get_save_format(filepath, save_format) if save_format not in ("keras", "tf", "h5", "keras_v3"): raise ValueError( "Unknown `save_format` argument. Expecte
Prepare public API surface for v3 saving. PiperOrigin-RevId: 484397600
save_model
c9068087d9142bab573e0c300bf9874a957accff
keras
saving_api.py
18
33
https://github.com/keras-team/keras.git
10
144
1
80
273
Python
{ "docstring": "Saves a model as a TensorFlow SavedModel or HDF5 file.\n\n See the [Serialization and Saving guide](\n https://keras.io/guides/serialization_and_saving/) for details.\n\n Args:\n model: Keras model instance to be saved.\n filepath: `str` or `pathlib.Path` object. Path where to save the model.\n overwrite: Whether we should overwrite any existing model at the target\n location, or instead ask the user via an interactive prompt.\n save_format: Either `\"keras\"`, `\"tf\"`, `\"h5\"`,\n indicating whether to save the model\n in the native Keras format (`.keras`),\n in the TensorFlow SavedModel format (referred to as \"SavedModel\"\n below), or in the legacy HDF5 format (`.h5`).\n Defaults to `\"tf\"` in TF 2.X, and `\"h5\"` in TF 1.X.\n\n SavedModel format arguments:\n include_optimizer: Only applied to SavedModel and legacy HDF5 formats.\n If False, do not save the optimizer state. Defaults to True.\n signatures: Only applies to SavedModel format. Signatures to save\n with the SavedModel. See the `signatures` argument in\n `tf.saved_model.save` for details.\n options: Only applies to SavedModel format.\n `tf.saved_model.SaveOptions` object that specifies SavedModel\n saving options.\n save_traces: Only applies to SavedModel format. When enabled, the\n SavedModel will store the function traces for each layer. This\n can be disabled, so that only the configs of each layer are stored.\n Defaults to `True`. Disabling this will decrease serialization time\n and reduce file size, but it requires that all custom layers/models\n implement a `get_config()` method.\n\n Example:\n\n ```python\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(5, input_shape=(3,)),\n tf.keras.layers.Softmax()])\n model.save(\"model.keras\")\n loaded_model = tf.keras.models.load_model(\"model.keras\")\n x = tf.random.uniform((10, 3))\n assert np.allclose(model.predict(x), loaded_model.predict(x))\n ```\n\n Note that `model.save()` is an alias for `tf.keras.models.save_model()`.\n\n The SavedModel or HDF5 file contains:\n\n - The model's configuration (architecture)\n - The model's weights\n - The model's optimizer's state (if any)\n\n Thus models can be reinstantiated in the exact same state, without any of\n the code used for model definition or training.\n\n Note that the model weights may have different scoped names after being\n loaded. Scoped names include the model/layer names, such as\n `\"dense_1/kernel:0\"`. It is recommended that you use the layer properties to\n access specific variables, e.g. `model.get_layer(\"dense_1\").kernel`.\n\n __SavedModel serialization format__\n\n With `save_format=\"tf\"`, the model and all trackable objects attached\n to the it (e.g. layers and variables) are saved as a TensorFlow SavedModel.\n The model config, weights, and optimizer are included in the SavedModel.\n Additionally, for every Keras layer attached to the model, the SavedModel\n stores:\n\n * The config and metadata -- e.g. name, dtype, trainable status\n * Traced call and loss functions, which are stored as TensorFlow\n subgraphs.\n\n The traced functions allow the SavedModel format to save and load custom\n layers without the original class definition.\n\n You can choose to not save the traced functions by disabling the\n `save_traces` option. This will decrease the time it takes to save the model\n and the amount of disk space occupied by the output SavedModel. If you\n enable this option, then you _must_ provide all custom class definitions\n when loading the model. See the `custom_objects` argument in\n `tf.keras.models.load_model`.\n ", "language": "en", "n_whitespaces": 847, "n_words": 472, "vocab_size": 267 }
def save_model(model, filepath, overwrite=True, save_format=None, **kwargs): save_format = get_save_format(filepath, save_format) if save_format not in ("keras", "tf", "h5", "keras_v3"): raise ValueError( "Unknown `save_format` argument. Expected one of " "'keras', 'tf', or 'h5'. " f"Received: save_format{save_format}" ) if save_format == "keras_v3" or ( saving_lib.saving_v3_enabled() and save_format == "keras" ): # If file exists and should not be overwritten. try: exists = os.path.exists(filepath) except TypeError: exists = False if exists and not overwrite: proceed = io_utils.ask_to_proceed_with_overwrite(filepath) if not proceed: return if kwargs: raise ValueError( "The following argument(s) are not supported " f"with the native Keras format: {list(kwargs.keys())}" ) saving_lib.save_model(model, filepath) else: # Legacy case return legacy_sm_saving_lib.save_model( model, filepath, overwrite=overwrite, save_format=save_format, **kwargs, ) @keras_export("keras.models.load_model")
@pytest.mark.slow
42,077
176,745
193
networkx/algorithms/tree/tests/test_mst.py
78
22
def test_random_spanning_tree_additive_small(): pytest.importorskip("numpy") edges = { (0, 1): 1, (0, 2): 1, (0, 5): 3, (1, 2): 2, (1, 4): 3, (2, 3): 3, (5, 3): 4, (5, 4): 5, (4, 3): 4, } # Build the graph G = nx.Graph() for u, v in edges: G.add_edge(u, v, weight=edges[(u, v)]) solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] solution = nx.Graph() solution.add_edges_from(solution_edges) sampled_tree = nx.random_spanning_tree( G, weight="weight", multiplicative=False, seed=37 ) assert nx.utils.edge
Moved random_spanning_tree to public API (#5656) Adds two new functions random_spanning_tree and total_spanning_tree_weight to public networkx API, accessible from the main namespace. These functions had previously been defined, tested, and used internally in the TSP package, but have now been added to the public API as they are generally applicable. Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
test_random_spanning_tree_additive_small
99d31932bd7388aadfa54305c116ca0c9261a67e
networkx
test_mst.py
12
23
https://github.com/networkx/networkx.git
2
201
1
57
295
Python
{ "docstring": "\n Sample a single spanning tree from the additive method.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
def test_random_spanning_tree_additive_small(): pytest.importorskip("numpy") edges = { (0, 1): 1, (0, 2): 1, (0, 5): 3, (1, 2): 2, (1, 4): 3, (2, 3): 3, (5, 3): 4, (5, 4): 5, (4, 3): 4, } # Build the graph G = nx.Graph() for u, v in edges: G.add_edge(u, v, weight=edges[(u, v)]) solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] solution = nx.Graph() solution.add_edges_from(solution_edges) sampled_tree = nx.random_spanning_tree( G, weight="weight", multiplicative=False, seed=37 ) assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) @pytest.mark.slow
46,922
192,979
133
references/optical_flow/utils.py
86
24
def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400): if gamma > 1: raise ValueError(f"Gamma should be < 1, got {gamma}.") # exlude invalid pixels and extremely large diplacements flow_norm = torch.sum(flow_gt**2, dim=1).sqrt() valid_flow_mask = valid_flow_mask & (flow_norm < max_flow) valid_flow_mask = valid_flow_mask[:, None, :, :] flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W) abs_diff = (flow_preds - flow_gt).abs() abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4)) num_predictions = flow_preds.shape[0] weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.
Upgrade usort to `1.0.2` and black to 22.3.0 (#5106) * upgrade usort to * Also update black * Actually use 1.0.2 * Apply pre-commit Co-authored-by: Nicolas Hug <[email protected]>
sequence_loss
6ca9c76adb6daf2695d603ad623a9cf1c4f4806f
vision
utils.py
12
13
https://github.com/pytorch/vision.git
2
157
0
65
244
Python
{ "docstring": "Loss function defined over sequence of flow predictions", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400): if gamma > 1: raise ValueError(f"Gamma should be < 1, got {gamma}.") # exlude invalid pixels and extremely large diplacements flow_norm = torch.sum(flow_gt**2, dim=1).sqrt() valid_flow_mask = valid_flow_mask & (flow_norm < max_flow) valid_flow_mask = valid_flow_mask[:, None, :, :] flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W) abs_diff = (flow_preds - flow_gt).abs() abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4)) num_predictions = flow_preds.shape[0] weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device) flow_loss = (abs_diff * weights).sum() return flow_loss
6,324
34,756
347
src/transformers/modeling_tf_utils.py
108
13
def booleans_processing(config, **kwargs): final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] =
Misfiring tf warnings (#15442) * Fix spurious warning in TF TokenClassification models * Fixing one last spurious warning * Removing outdated warning altogether
booleans_processing
09f9d07271297e97f5a0495fcf7e9cc107fedbdd
transformers
modeling_tf_utils.py
15
38
https://github.com/huggingface/transformers.git
13
247
0
51
322
Python
{ "docstring": "\n Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or\n graph)\n\n Args:\n config ([`PretrainedConfig`]):\n The config of the running model.\n **kwargs:\n The boolean parameters\n\n Returns:\n A dictionary with the proper values for each boolean\n ", "language": "en", "n_whitespaces": 104, "n_words": 45, "vocab_size": 36 }
def booleans_processing(config, **kwargs): final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = ( kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict ) if "use_cache" in kwargs: final_booleans["use_cache"] = ( kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None) ) else: final_booleans["output_attentions"] = config.output_attentions final_booleans["output_hidden_states"] = config.output_hidden_states if kwargs.get("return_dict", None) not in (None, True): tf_logger.warning( "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`." ) final_booleans["return_dict"] = True if "use_cache" in kwargs: final_booleans["use_cache"] = getattr(config, "use_cache", None) return final_booleans
9,045
46,950
259
tests/jobs/test_scheduler_job.py
84
33
def test_dagrun_root_fail_unfinished(self): # TODO: this should live in test_dagrun.py # Run both the failed and successful tasks dag_id = 'test_dagrun_states_root_fail_unfinished' dag = self.dagbag.get_
Fixed backfill interference with scheduler (#22701) Co-authored-by: Dmirty Suvorov <[email protected]>
test_dagrun_root_fail_unfinished
9769a65c20f6028d640061efacbc5bfeb5ebaf3d
airflow
test_scheduler_job.py
11
17
https://github.com/apache/airflow.git
1
128
0
70
215
Python
{ "docstring": "\n DagRuns with one unfinished and one failed root task -> RUNNING\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
def test_dagrun_root_fail_unfinished(self): # TODO: this should live in test_dagrun.py # Run both the failed and successful tasks dag_id = 'test_dagrun_states_root_fail_unfinished' dag = self.dagbag.get_dag(dag_id) dr = dag.create_dagrun( run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE, state=None, ) self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', dr.run_id) with pytest.raises(AirflowException): dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec) # Mark the successful task as never having run since we want to see if the # dagrun will be in a running state despite having an unfinished task. with create_session() as session: ti = dr.get_task_instance('test_dagrun_unfinished', session=session) ti.state = State.NONE session.commit() dr.update_state() assert dr.state == State.RUNNING
40,287
168,328
1,553
pandas/plotting/_core.py
266
38
def _get_call_args(backend_name, data, args, kwargs): if isinstance(data, ABCSeries): arg_def = [ ("kind", "line"), ("ax", None), ("figsize", None), ("use_index", True), ("title", None), ("grid", None), ("legend", False), ("style", None), ("logx", False), ("logy", False), ("loglog", False), ("xticks", None), ("yticks", None), ("xlim", None), ("ylim", None), ("rot", None), ("fontsize", None), ("colormap", None), ("table", False), ("yerr", None), ("xerr", None), ("label", None), ("secondary_y", False), ("xlabel", None), ("ylabel", None), ] elif isinstance(data, ABCDataFrame): arg_def = [ ("x", None), ("y", None), ("kind", "line"), ("ax", None), ("subplots", False), ("sharex", None), ("sharey", False), ("layout", None), ("figsize", None), ("use_index", True), ("title", None), ("grid", None), ("legend", True), ("style", None), ("logx", False), ("logy", False), ("loglog", False), ("xticks", None), ("yticks", None), ("xlim", None), ("ylim", None), ("rot", None), ("fontsize", None), ("colormap", None), ("table", False), ("yerr", None), ("xerr", None), ("secondary_y", False), ("sort_columns", False), ("xlabel", None), ("ylabel", None), ] else: raise TypeError( f"Called plot accessor for type {type(data).__name__}, " "expected Series or DataFrame" ) if "sort_columns" in itertools.chain(args, kwargs.keys()): warnings.warn( "`sort_columns` is deprecated and will be removed in a future " "version.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = ", ".join( [f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)] ) msg = ( "`Series.plot()` should not be called with positional " "arguments, only keyword arguments. The order of "
DEPR: `sort_columns` in `plot` (#47563) (#48073)
_get_call_args
8b72297c8799725e98cb2c6aee664325b752194f
pandas
_core.py
16
97
https://github.com/pandas-dev/pandas.git
9
570
0
142
924
Python
{ "docstring": "\n This function makes calls to this accessor `__call__` method compatible\n with the previous `SeriesPlotMethods.__call__` and\n `DataFramePlotMethods.__call__`. Those had slightly different\n signatures, since `DataFramePlotMethods` accepted `x` and `y`\n parameters.\n ", "language": "en", "n_whitespaces": 71, "n_words": 28, "vocab_size": 27 }
def _get_call_args(backend_name, data, args, kwargs): if isinstance(data, ABCSeries): arg_def = [ ("kind", "line"), ("ax", None), ("figsize", None), ("use_index", True), ("title", None), ("grid", None), ("legend", False), ("style", None), ("logx", False), ("logy", False), ("loglog", False), ("xticks", None), ("yticks", None), ("xlim", None), ("ylim", None), ("rot", None), ("fontsize", None), ("colormap", None), ("table", False), ("yerr", None), ("xerr", None), ("label", None), ("secondary_y", False), ("xlabel", None), ("ylabel", None), ] elif isinstance(data, ABCDataFrame): arg_def = [ ("x", None), ("y", None), ("kind", "line"), ("ax", None), ("subplots", False), ("sharex", None), ("sharey", False), ("layout", None), ("figsize", None), ("use_index", True), ("title", None), ("grid", None), ("legend", True), ("style", None), ("logx", False), ("logy", False), ("loglog", False), ("xticks", None), ("yticks", None), ("xlim", None), ("ylim", None), ("rot", None), ("fontsize", None), ("colormap", None), ("table", False), ("yerr", None), ("xerr", None), ("secondary_y", False), ("sort_columns", False), ("xlabel", None), ("ylabel", None), ] else: raise TypeError( f"Called plot accessor for type {type(data).__name__}, " "expected Series or DataFrame" ) if "sort_columns" in itertools.chain(args, kwargs.keys()): warnings.warn( "`sort_columns` is deprecated and will be removed in a future " "version.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = ", ".join( [f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)] ) msg = ( "`Series.plot()` should not be called with positional " "arguments, only keyword arguments. The order of " "positional arguments will change in the future. " f"Use `Series.plot({keyword_args})` instead of " f"`Series.plot({positional_args})`." ) raise TypeError(msg) pos_args = {name: value for (name, _), value in zip(arg_def, args)} if backend_name == "pandas.plotting._matplotlib": kwargs = dict(arg_def, **pos_args, **kwargs) else: kwargs = dict(pos_args, **kwargs) x = kwargs.pop("x", None) y = kwargs.pop("y", None) kind = kwargs.pop("kind", "line") return x, y, kind, kwargs
50,048
202,094
40
tests/cache/tests_async.py
12
6
async def test_ahas_key(self): await cache.aset("hello1", "goodbye1") self.assertIs(await cache.ahas_key("hello1"), False) self.assertIs(await cache.ahas_key("goodbye1"), False)
Refs #33476 -- Reformatted code with Black.
test_ahas_key
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests_async.py
11
4
https://github.com/django/django.git
1
43
0
10
80
Python
{ "docstring": "ahas_key() doesn't ever return True for the dummy cache backend.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
async def test_ahas_key(self): await cache.aset("hello1", "goodbye1") self.assertIs(await cache.ahas_key("hello1"), False) self.assertIs(await cache.ahas_key("goodbye1"), False)
96,070
297,101
44
homeassistant/components/gree/climate.py
12
8
def min_temp(self) -> float: if self.temperature_unit == UnitOfTemperature.CELSIUS: return TEMP_MIN return TEMP_MIN_F
Use UnitOfTemperature in climate entities [g-l] (#83127) * Use UnitOfTemperature in climate entities [g-l] * Adjust gree * Adjust honeywell
min_temp
68e454712dae5b65599ef12a025bc4446f7e3e6e
core
climate.py
7
5
https://github.com/home-assistant/core.git
2
21
0
11
36
Python
{ "docstring": "Return the minimum temperature supported by the device.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
def min_temp(self) -> float: if self.temperature_unit == UnitOfTemperature.CELSIUS: return TEMP_MIN return TEMP_MIN_F
47,462
195,878
111
sympy/core/expr.py
49
11
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): if x and x not in self.free_symbols: return self if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): return self.series(x, x0, n, dir, cdir=cdir) else: return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
Fixed failing doctest
nseries
46ba104ee0f9cb35b54c2f5f5591cfabb26d0301
sympy
expr.py
11
7
https://github.com/sympy/sympy.git
6
91
0
40
135
Python
{ "docstring": "\n Wrapper to _eval_nseries if assumptions allow, else to series.\n\n If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is\n called. This calculates \"n\" terms in the innermost expressions and\n then builds up the final series just by \"cross-multiplying\" everything\n out.\n\n The optional ``logx`` parameter can be used to replace any log(x) in the\n returned series with a symbolic value to avoid evaluating log(x) at 0. A\n symbol to use in place of log(x) should be provided.\n\n Advantage -- it's fast, because we do not have to determine how many\n terms we need to calculate in advance.\n\n Disadvantage -- you may end up with less terms than you may have\n expected, but the O(x**n) term appended will always be correct and\n so the result, though perhaps shorter, will also be correct.\n\n If any of those assumptions is not met, this is treated like a\n wrapper to series which will try harder to return the correct\n number of terms.\n\n See also lseries().\n\n Examples\n ========\n\n >>> from sympy import sin, log, Symbol\n >>> from sympy.abc import x, y\n >>> sin(x).nseries(x, 0, 6)\n x - x**3/6 + x**5/120 + O(x**6)\n >>> log(x+1).nseries(x, 0, 5)\n x - x**2/2 + x**3/3 - x**4/4 + O(x**5)\n\n Handling of the ``logx`` parameter --- in the following example the\n expansion fails since ``sin`` does not have an asymptotic expansion\n at -oo (the limit of log(x) as x approaches 0):\n\n >>> e = sin(log(x))\n >>> e.nseries(x, 0, 6)\n Traceback (most recent call last):\n ...\n PoleError: ...\n ...\n >>> logx = Symbol('logx')\n >>> e.nseries(x, 0, 6, logx=logx)\n sin(logx)\n\n In the following example, the expansion works but only returns self\n unless the ``logx`` parameter is used:\n\n >>> e = x**y\n >>> e.nseries(x, 0, 2)\n x**y\n >>> e.nseries(x, 0, 2, logx=logx)\n exp(logx*y)\n\n ", "language": "en", "n_whitespaces": 610, "n_words": 294, "vocab_size": 182 }
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): if x and x not in self.free_symbols: return self if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): return self.series(x, x0, n, dir, cdir=cdir) else: return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
45,750
187,229
46
tests/test_api_validate.py
15
14
def test_getitem_error(self, exception): container = self.Container(exception("failure")) with pytest.raises(validate.ValidationError) as cm:
plugin.api.validate: rewrite tests Completely rewrite tests using pytest, with full coverage
test_getitem_error
d09112ab1f6db6aa605650fe1ff6a3028344f90d
streamlink
test_api_validate.py
13
10
https://github.com/streamlink/streamlink.git
1
55
0
15
96
Python
{ "docstring": "\n ValidationError(GetItemSchema):\n Could not get key 'foo' from object Container\n Context:\n failure\n ", "language": "en", "n_whitespaces": 71, "n_words": 11, "vocab_size": 11 }
def test_getitem_error(self, exception): container = self.Container(exception("failure")) with pytest.raises(validate.ValidationError) as cm: validate.validate(validate.get("foo", default="default"), container) assert_validationerror(cm.value, )
70,935
245,986
24
mmdet/models/task_modules/prior_generators/point_generator.py
10
8
def num_base_priors(self) -> List[int]:
[Doc]: Add typehint and update docstring for task modules (#9468) * part 11 * part 11 * part 11 * part 11
num_base_priors
92e2eb355bc964af5e798281bcb4cb9179fdaecc
mmdetection
point_generator.py
12
4
https://github.com/open-mmlab/mmdetection.git
2
27
0
10
44
Python
{ "docstring": "list[int]: The number of priors (points) at a point\n on the feature grid", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
def num_base_priors(self) -> List[int]: return [1 for _ in range(len(self.strides))]
51,961
207,420
243
tests/admin_utils/test_logentry.py
43
24
def test_logentry_change_message_localized_datetime_input(self): post_data = { "site": self.site.pk, "title": "Changed", "hist": "Some content", "created_0": "12/03/2008", "created_1": "11:54", } with translation.override("fr"): change_url = reverse( "admin:admin_utils_article_change", args=[quote(self.a1.pk)] ) response = self.client.post(change_url, post_data) self.assertRedirects( response, reverse("admin:admin_utils_article_changelist") ) logentry = LogEntry.objects.filter( content_type__model__iexact="article" ).latest("id") self.assertEqual(logentry.get_change_message(), "Changed Tit
Refs #33476 -- Reformatted code with Black.
test_logentry_change_message_localized_datetime_input
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_logentry.py
16
20
https://github.com/django/django.git
1
113
0
39
206
Python
{ "docstring": "\n Localized date/time inputs shouldn't affect changed form data detection.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def test_logentry_change_message_localized_datetime_input(self): post_data = { "site": self.site.pk, "title": "Changed", "hist": "Some content", "created_0": "12/03/2008", "created_1": "11:54", } with translation.override("fr"): change_url = reverse( "admin:admin_utils_article_change", args=[quote(self.a1.pk)] ) response = self.client.post(change_url, post_data) self.assertRedirects( response, reverse("admin:admin_utils_article_changelist") ) logentry = LogEntry.objects.filter( content_type__model__iexact="article" ).latest("id") self.assertEqual(logentry.get_change_message(), "Changed Title and History.")
else:
77,523
263,954
97
PyInstaller/depend/bytecode.py
66
5
def _cleanup_code(code): return code # Nothing to do here # language=PythonVerboseRegExp _call_function_bytecode = bytecode_regex( rb ) else: # Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides # unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is goin
depend: adjust bytecode scanner for python 3.11 Python 3.11 removed CALL_FUNCTION and CALL_METHOD opcodes, replacing them with PRECALL + CALL. For our purposes, it should be enough to match PRECALL only (as both opcodes have same parameter, i.e., the argument count). In addition, the bytecode is now peppered with CACHE instructions, which we filter out in pre-processing phase to avoid complicating the regex rules. Furthermore, the low bit of argument to LOAD_GLOBAL opcode must now be ignored.
_cleanup_code
8ee5afa1ea56906b30ba2ea4578082c61a1f94e2
pyinstaller
bytecode.py
7
2
https://github.com/pyinstaller/pyinstaller.git
1
7
1
52
32
Python
{ "docstring": "\n # Matches `global_function('some', 'constant', 'arguments')`.\n\n # Load the global function. In code with >256 of names, this may require extended name references.\n ((?:`EXTENDED_ARG`.)*\n (?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).)\n\n # For foo.bar.whizz(), the above is the 'foo', below is the 'bar.whizz'.\n ((?:(?:`EXTENDED_ARG`.)*\n (?:`LOAD_METHOD`|`LOAD_ATTR`).)*)\n\n # Load however many arguments it takes. These (for now) must all be constants.\n # Again, code with >256 constants may need extended enumeration.\n ((?:(?:`EXTENDED_ARG`.)*\n `LOAD_CONST`.)*)\n\n # Call the function. The parameter is the argument count (which may also be >256) if CALL_FUNCTION or\n # CALL_METHOD are used. For CALL_FUNCTION_EX, the parameter are flags.\n ((?:`EXTENDED_ARG`.)*\n (?:`CALL_FUNCTION`|`CALL_METHOD`|`CALL_FUNCTION_EX`).)\n ", "language": "en", "n_whitespaces": 207, "n_words": 94, "vocab_size": 66 }
def _cleanup_code(code): return code # Nothing to do here # language=PythonVerboseRegExp _call_function_bytecode = bytecode_regex( rb ) else: # Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides # unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is going to render them # unreadable, so instead we pre-process the bytecode and filter the offending opcodes out.
3,858
21,469
104
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
26
9
def filemode(mode): perm = [] for table in filemode_table
Vendor in pip 22.1.2
filemode
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
tarfile.py
13
10
https://github.com/pypa/pipenv.git
4
51
0
24
89
Python
{ "docstring": "Convert a file's mode to a string of the form\n -rwxrwxrwx.\n Used by TarFile.list()\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
def filemode(mode): perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append("-") return "".join(perm)
1,612
9,415
35
reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py
25
12
def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'): r k = _setup_kernel(k) * gain
initialize ostec
filter_2d
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
upfirdn_2d.py
11
21
https://github.com/deepinsight/insightface.git
1
54
0
23
105
Python
{ "docstring": "Filter a batch of 2D images with the given FIR filter.\n\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`\n and filters each image with the given filter. The filter is normalized so that\n if the input pixels are constant, they will be scaled by the specified `gain`.\n Pixels outside the image are assumed to be zero.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the same shape and datatype as `x`.\n ", "language": "en", "n_whitespaces": 232, "n_words": 130, "vocab_size": 83 }
def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'): r k = _setup_kernel(k) * gain p = k.shape[0] - 1 return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl) #----------------------------------------------------------------------------
14,198
66,513
17
erpnext/patches/v10_0/set_currency_in_pricing_rule.py
26
13
def execute(): frappe.reload_doctype("Pricing Rule") currency = frappe.db.get_default("currency") for doc in frappe.get_all("Pricing Rule", fields=["company", "name"]): if doc.company: currenc
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
set_currency_in_pricing_rule.py
13
9
https://github.com/frappe/erpnext.git
3
73
0
24
126
Python
{ "docstring": "update `tabPricing Rule` set currency = %s where name = %s", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
def execute(): frappe.reload_doctype("Pricing Rule") currency = frappe.db.get_default("currency") for doc in frappe.get_all("Pricing Rule", fields=["company", "name"]): if doc.company: currency = frappe.get_cached_value("Company", doc.company, "default_currency") frappe.db.sql( , (currency, doc.name) )
10,437
51,941
341
modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/swinir.py
131
27
def forward(self, x, mask=None): B_, N, C = x.shape qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4)) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose((0, 1, 3, 2))) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape( (-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0)
add swinir_l_real_sr_x4 (#2076) * git add swinir_l_real_sr_x4 * fix typo * fix typo Co-authored-by: chenjian <[email protected]>
forward
2e373966a7fd3119c205350fb14d0b7bfe74185d
PaddleHub
swinir.py
13
23
https://github.com/PaddlePaddle/PaddleHub.git
2
288
0
80
490
Python
{ "docstring": "\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n ", "language": "en", "n_whitespaces": 58, "n_words": 21, "vocab_size": 18 }
def forward(self, x, mask=None): B_, N, C = x.shape qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4)) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose((0, 1, 3, 2))) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape( (-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0) attn = attn.reshape((-1, self.num_heads, N, N)) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose((0, 2, 1, 3)).reshape((B_, N, C)) x = self.proj(x) x = self.proj_drop(x) return x
72,338
248,546
780
tests/test_event_auth.py
154
17
def test_join_rules_invite(self): creator = "@creator:example.com" pleb = "@joiner:example.com" auth_events = { ("m.room.create", ""): _create_event(RoomVersions.V6, creator), ("m.room.member", creator): _join_event(RoomVersions.V6, creator), ("m.room.join_rules", ""): _join_rules_event(
EventAuthTestCase: build events for the right room version In practice, when we run the auth rules, all of the events have the right room version. Let's stop building Room V1 events for these tests and use the right version.
test_join_rules_invite
2959184a42398277ff916206235b844a8f7be5d7
synapse
test_event_auth.py
12
56
https://github.com/matrix-org/synapse.git
1
325
0
69
518
Python
{ "docstring": "\n Test joining an invite only room.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
def test_join_rules_invite(self): creator = "@creator:example.com" pleb = "@joiner:example.com" auth_events = { ("m.room.create", ""): _create_event(RoomVersions.V6, creator), ("m.room.member", creator): _join_event(RoomVersions.V6, creator), ("m.room.join_rules", ""): _join_rules_event( RoomVersions.V6, creator, "invite" ), } # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "ban" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left cannot re-join. auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "leave" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "join" ) event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), )
47,065
194,765
103
parlai/crowdsourcing/tasks/pairwise_per_turn_eval/worlds.py
53
11
def validate_onboarding(data): logging.info(f"Validating onboarding data {data}") messages = data['outputs']['messages'] if len(messages) == 0: return False status_message = messages[-2] if status_message is None: return False submitted_data = status_message.get('data') if submitted_data is None: return False final_status = submitted_data.get('final_status') return final_sta
Create Per-Turn Evaluation Folder in ParlAI (#4323) * Auto fixes * Remove worker blocklists * Add __init__.py * Add __init__.py * Lint fix * Rename task configs * More lint error fixes * Update Per Turn Eval README with link to paper * Add configs to example * Remove commented out lines * More README improvements * Add bibtex to README * address comment
validate_onboarding
2d062907bcf416150e36879a2246218babad28b1
ParlAI
worlds.py
9
13
https://github.com/facebookresearch/ParlAI.git
4
73
0
37
132
Python
{ "docstring": "\n Check the contents of the data to ensure they are valid.\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 10 }
def validate_onboarding(data): logging.info(f"Validating onboarding data {data}") messages = data['outputs']['messages'] if len(messages) == 0: return False status_message = messages[-2] if status_message is None: return False submitted_data = status_message.get('data') if submitted_data is None: return False final_status = submitted_data.get('final_status') return final_status == ONBOARD_SUCCESS # TODO: find a better way to avoid duplicating this from model_chat world.py
99,723
300,869
58
tests/helpers/test_event.py
23
8
async def test_async_track_entity_registry_updated_event_with_empty_list(hass): unsub_single = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2 = async_track_entity_registry_updated_event( hass, [], ha.callback(lamb
Clean up accessing event helpers via hass (#72011)
test_async_track_entity_registry_updated_event_with_empty_list
8f4caf414124f380a8f5e1d54aedb54a8f6c5c05
core
test_event.py
12
9
https://github.com/home-assistant/core.git
1
50
0
15
84
Python
{ "docstring": "Test async_track_entity_registry_updated_event passing an empty list of entities.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
async def test_async_track_entity_registry_updated_event_with_empty_list(hass): unsub_single = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2 = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2() unsub_single()
29,220
130,296
484
python/ray/_private/tls_utils.py
167
67
def generate_self_signed_tls_certs(): try: from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID except ImportError: raise ImportError( "Using `Security.temporary` requires `cryptography`, please " "install it using either pip or conda" ) key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) key_contents = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, enc
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
generate_self_signed_tls_certs
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
tls_utils.py
25
49
https://github.com/ray-project/ray.git
2
324
0
132
522
Python
{ "docstring": "Create self-signed key/cert pair for testing.\n\n This method requires the library ``cryptography`` be installed.\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
def generate_self_signed_tls_certs(): try: from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID except ImportError: raise ImportError( "Using `Security.temporary` requires `cryptography`, please " "install it using either pip or conda" ) key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) key_contents = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ).decode() ray_interal = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "ray-internal")]) # This is the same logic used by the GCS server to acquire a # private/interal IP address to listen on. If we just use localhost + # 127.0.0.1 then we won't be able to connect to the GCS and will get # an error like "No match found for server name: 192.168.X.Y" s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) private_ip_address = s.getsockname()[0] s.close() altnames = x509.SubjectAlternativeName( [ x509.DNSName( socket.gethostbyname(socket.gethostname()) ), # Probably 127.0.0.1 x509.DNSName("127.0.0.1"), x509.DNSName(private_ip_address), # 192.168.*.* x509.DNSName("localhost"), ] ) now = datetime.datetime.utcnow() cert = ( x509.CertificateBuilder() .subject_name(ray_interal) .issuer_name(ray_interal) .add_extension(altnames, critical=False) .public_key(key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(now) .not_valid_after(now + datetime.timedelta(days=365)) .sign(key, hashes.SHA256(), default_backend()) ) cert_contents = cert.public_bytes(serialization.Encoding.PEM).decode() return cert_contents, key_contents
84,835
284,585
60
openbb_terminal/stocks/government/gov_controller.py
25
10
def print_help(self): has_ticker_start = "[unvl]" if not self.ticker else "" has_ticker_end = "[/unvl]" if not self.ticker else ""
Bounty Hunter mood: 11 bugs fixed (#1853) * fix #1850 * fix #1831 * add extra check to Reddit API keys * ignore warning message to update praw api * improve OpenBB links * fix quick performance only on stocks class because I'm James bitch * fix quick performance only on stocks class because I'm James bitch * fix #1829 * fix #1821 * add messari to keys - fix #1819 * example of multiple oclumns to check on options/chains * minor improvement in xlabel re. #1814 * remove repeated command * fix #1698 * fix line too long * fix #1814 fr now * fix tests
print_help
a6f7e111e68346aeab315985b3704c2710693b38
OpenBBTerminal
gov_controller.py
10
24
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
42
0
18
96
Python
{ "docstring": "Print help\n[src][QuiverQuant][/src]\n\n[info]Explore:[/info][cmds]\n lasttrades last trades\n topbuys show most purchased stocks\n topsells show most sold stocks\n lastcontracts show last government contracts given out\n qtrcontracts quarterly government contracts analysis\n toplobbying top corporate lobbying tickers\n\n load load a specific ticker for analysis[/cmds]\n\n[param]Ticker: [/param]{self.ticker or None}{has_ticker_start}[cmds]\n\n gtrades show government trades for ticker\n contracts show government contracts for ticker\n histcont show historical quarterly government contracts for ticker\n lobbying corporate lobbying details for ticker[/cmds]{has_ticker_end}\n ", "language": "en", "n_whitespaces": 191, "n_words": 71, "vocab_size": 42 }
def print_help(self): has_ticker_start = "[unvl]" if not self.ticker else "" has_ticker_end = "[/unvl]" if not self.ticker else "" help_text = f console.print(text=help_text, menu="Stocks - Government")
14,639
67,844
38
erpnext/stock/reorder_item.py
60
18
def get_item_warehouse_projected_qty(items_to_consider): item_warehouse_projected_qty = {} for item_code, warehouse, projected_qty in frappe.db.sql( .format( ", ".join(["%s"] * le
style: format code with black
get_item_warehouse_projected_qty
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
reorder_item.py
16
24
https://github.com/frappe/erpnext.git
6
166
0
44
265
Python
{ "docstring": "select item_code, warehouse, projected_qty\n\t\tfrom tabBin where item_code in ({0})\n\t\t\tand (warehouse != \"\" and warehouse is not null)", "language": "en", "n_whitespaces": 16, "n_words": 19, "vocab_size": 18 }
def get_item_warehouse_projected_qty(items_to_consider): item_warehouse_projected_qty = {} for item_code, warehouse, projected_qty in frappe.db.sql( .format( ", ".join(["%s"] * len(items_to_consider)) ), items_to_consider, ): if item_code not in item_warehouse_projected_qty: item_warehouse_projected_qty.setdefault(item_code, {}) if warehouse not in item_warehouse_projected_qty.get(item_code): item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty) warehouse_doc = frappe.get_doc("Warehouse", warehouse) while warehouse_doc.parent_warehouse: if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse): item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt( projected_qty ) else: item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty) warehouse_doc = frappe.get_doc("Warehouse", warehouse_doc.parent_warehouse) return item_warehouse_projected_qty
3,959
21,619
225
pipenv/patched/notpip/_vendor/typing_extensions.py
132
27
def _collect_type_vars(types, typevar_types=None): if typevar_types is None: typevar_types = typing.TypeVar tvars = [] for t in types: if ( isinstance(t, typevar_types) and t not in tvars and not _is_unpack(t) ): tvars.append(t) if _should_collect_from_parameters(t): tvars.extend([t for t in t.__parameters__ if t not in tvars]) return tuple(tvars) NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) T = typing.TypeVar('T') # Any type. KT = typing.TypeVar('KT') # Key type. VT = typing.TypeVar('VT') # Value type. T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. ClassVar = typing.ClassVar # On older versions of typing there is an internal class named "Final". # 3.8+ if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 else:
Vendor in pip 22.1.2
_collect_type_vars
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
typing_extensions.py
14
14
https://github.com/pypa/pipenv.git
9
86
0
89
293
Python
{ "docstring": "Collect all type variable contained in types in order of\n first appearance (lexicographic order). For example::\n\n _collect_type_vars((T, List[S, T])) == (T, S)\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 21 }
def _collect_type_vars(types, typevar_types=None): if typevar_types is None: typevar_types = typing.TypeVar tvars = [] for t in types: if ( isinstance(t, typevar_types) and t not in tvars and not _is_unpack(t) ): tvars.append(t) if _should_collect_from_parameters(t): tvars.extend([t for t in t.__parameters__ if t not in tvars]) return tuple(tvars) NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) T = typing.TypeVar('T') # Any type. KT = typing.TypeVar('KT') # Key type. VT = typing.TypeVar('VT') # Value type. T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. ClassVar = typing.ClassVar # On older versions of typing there is an internal class named "Final". # 3.8+ if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 else:
39,794
166,238
46
pandas/core/exchange/from_dataframe.py
20
10
def from_dataframe(df, allow_copy=True): if isinstance(df, pd.DataFrame): return df if not hasattr(df, "__dataframe__"):
ENH: Implement DataFrame interchange protocol (#46141)
from_dataframe
90140f055892a46f473bd26affab88a7f171e394
pandas
from_dataframe.py
10
6
https://github.com/pandas-dev/pandas.git
3
48
0
17
81
Python
{ "docstring": "\n Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.\n\n Parameters\n ----------\n df : DataFrameXchg\n Object supporting the exchange protocol, i.e. `__dataframe__` method.\n allow_copy : bool, default: True\n Whether to allow copying the memory to perform the conversion\n (if false then zero-copy approach is requested).\n\n Returns\n -------\n pd.DataFrame\n ", "language": "en", "n_whitespaces": 97, "n_words": 48, "vocab_size": 42 }
def from_dataframe(df, allow_copy=True): if isinstance(df, pd.DataFrame): return df if not hasattr(df, "__dataframe__"): raise ValueError("`df` does not support __dataframe__") return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
12,522
61,340
85
.venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py
39
14
def parse_wheel(wheel_zip, name): # type: (ZipFile, str) -> Tuple[str, Message] try: info_dir = wheel_dist_info_dir(wheel_zip, name) metadata = wheel
upd; format
parse_wheel
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
wheel.py
14
9
https://github.com/jindongwang/transferlearning.git
2
62
0
35
103
Python
{ "docstring": "Extract information from the provided wheel, ensuring it meets basic\n standards.\n\n Returns the name of the .dist-info directory and the parsed WHEEL metadata.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 20 }
def parse_wheel(wheel_zip, name): # type: (ZipFile, str) -> Tuple[str, Message] try: info_dir = wheel_dist_info_dir(wheel_zip, name) metadata = wheel_metadata(wheel_zip, info_dir) version = wheel_version(metadata) except UnsupportedWheel as e: raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e))) check_compatibility(version, name) return info_dir, metadata
6,207
34,177
114
utils/style_doc.py
70
17
def style_docstrings_in_code(code, max_len=119): # fmt: off splits = code.split('\"\"\"') splits = [ (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len)) for i, s in enumerate(splits) ] black_errors = "\n\n".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0]) splits = [s[0] if isinstance(s, tuple) else s for s in splits] clean_code = '\"\"\"'.join(splits) # fmt: on return clean_code, black_errors
Copies and docstring styling (#15202) * Style docstrings when making/checking copies * Polish
style_docstrings_in_code
1144d336b689d1710534b245697e41be7a168075
transformers
style_doc.py
15
10
https://github.com/huggingface/transformers.git
9
131
0
48
212
Python
{ "docstring": "\n Style all docstrings in some code.\n\n Args:\n code (`str`): The code in which we want to style the docstrings.\n max_len (`int`): The maximum number of characters per line.\n\n Returns:\n `Tuple[str, str]`: A tuple with the clean code and the black errors (if any)\n ", "language": "en", "n_whitespaces": 77, "n_words": 43, "vocab_size": 37 }
def style_docstrings_in_code(code, max_len=119): # fmt: off splits = code.split('\"\"\"') splits = [ (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len)) for i, s in enumerate(splits) ] black_errors = "\n\n".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0]) splits = [s[0] if isinstance(s, tuple) else s for s in splits] clean_code = '\"\"\"'.join(splits) # fmt: on return clean_code, black_errors
34,179
148,116
208
python/ray/_private/utils.py
73
12
def check_version_info(cluster_metadata): cluster_version_info = ( cluster_metadata["ray_version"], cluster_metadata["python_version"], ) version_info = compute_version_info() if version_info != cluster_version_info: node_ip_address = ray._private.services.get_node_ip_address() error_message = ( "Version mismatch: The cluster was started with:\n" " Ray: " + cluster_version_info[0] + "\n" " Python: " + cluster_version_info[1] + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info[0] + "\n" " Python: " + version_info[1]
[Core] Add a utility to check GCS / Ray cluster health (#23382) * Provide a utility to ping a Ray cluster and verify it has the same Ray version. This is useful to check if a Ray cluster is available at a given address, without connecting to the cluster with the more heavyweight ray.init(). This utility is integrated with ray memory to provide a better error message when the Ray cluster is unavailable. There seem to be user demand for exposing this as an API as well. * Improve the error message when the address provided to Ray does not contain port.
check_version_info
d5d2ef424965b2cfdc62a97674dbca7abe3af34b
ray
utils.py
22
17
https://github.com/ray-project/ray.git
2
90
0
39
176
Python
{ "docstring": "Check if the Python and Ray versions stored in GCS matches this process.\n Args:\n cluster_metadata: Ray cluster metadata from GCS.\n\n Raises:\n Exception: An exception is raised if there is a version mismatch.\n ", "language": "en", "n_whitespaces": 55, "n_words": 32, "vocab_size": 29 }
def check_version_info(cluster_metadata): cluster_version_info = ( cluster_metadata["ray_version"], cluster_metadata["python_version"], ) version_info = compute_version_info() if version_info != cluster_version_info: node_ip_address = ray._private.services.get_node_ip_address() error_message = ( "Version mismatch: The cluster was started with:\n" " Ray: " + cluster_version_info[0] + "\n" " Python: " + cluster_version_info[1] + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info[0] + "\n" " Python: " + version_info[1] + "\n" ) raise RuntimeError(error_message)
3,481
20,690
45
pipenv/patched/notpip/_vendor/rich/__init__.py
19
4
def get_console() -> "Console":
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
get_console
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
__init__.py
10
12
https://github.com/pypa/pipenv.git
2
26
0
16
50
Python
{ "docstring": "Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n ", "language": "en", "n_whitespaces": 41, "n_words": 25, "vocab_size": 23 }
def get_console() -> "Console": global _console if _console is None: from .console import Console _console = Console() return _console
12,300
60,852
111
.venv/lib/python3.8/site-packages/pip/_internal/models/link.py
52
7
def is_hash_allowed(self, hashes): # type: (Optional[Hashes]) -> bool if hashes is None or not self.has_hash: return False # Assert non-None so mypy knows self.hash_name and self.hash are str. assert self.hash_name is not None assert self.h
upd; format
is_hash_allowed
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
link.py
9
6
https://github.com/jindongwang/transferlearning.git
3
49
0
40
79
Python
{ "docstring": "\n Return True if the link has a hash and it is allowed.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
def is_hash_allowed(self, hashes): # type: (Optional[Hashes]) -> bool if hashes is None or not self.has_hash: return False # Assert non-None so mypy knows self.hash_name and self.hash are str. assert self.hash_name is not None assert self.hash is not None return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash) # TODO: Relax this comparison logic to ignore, for example, fragments.
76,126
260,218
110
sklearn/metrics/_ranking.py
60
28
def coverage_error(y_true, y_score, *, sample_weight=None): y_true = check_array(y_true, ensure_2d=True) y_score = check_array(y_score, ensure_2d=True) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true, input_name="y_true") if y_type != "multilabel-indicator": raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight)
FIX Ensure correct sklearn.metrics.coverage_error error message for 1D array (#23548) * Change input array to ensure_2d=True * Reshape input list to 2D if metric is coverage_error * Add test for error message with 1D array on coverage_error * Modify 1D error message test * Use parametrize to test different 1d arrays * Explain why reshape in test_regression_thresholded_inf_nan_input * Add changelog entry for this fix * Add test comments to sklearn/metrics/tests/test_ranking.py Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
coverage_error
e98e0353787f87ce10d6d47e643bbefe9b6a8ddd
scikit-learn
_ranking.py
12
14
https://github.com/scikit-learn/scikit-learn.git
3
153
0
46
244
Python
{ "docstring": "Coverage error measure.\n\n Compute how far we need to go through the ranked scores to cover all\n true labels. The best value is equal to the average number\n of labels in ``y_true`` per sample.\n\n Ties in ``y_scores`` are broken by giving maximal rank that would have\n been assigned to all tied values.\n\n Note: Our implementation's score is 1 greater than the one given in\n Tsoumakas et al., 2010. This extends it to handle the degenerate case\n in which an instance has 0 true labels.\n\n Read more in the :ref:`User Guide <coverage_error>`.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True binary labels in binary indicator format.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n coverage_error : float\n\n References\n ----------\n .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).\n Mining multi-label data. In Data mining and knowledge discovery\n handbook (pp. 667-685). Springer US.\n\n ", "language": "en", "n_whitespaces": 297, "n_words": 179, "vocab_size": 144 }
def coverage_error(y_true, y_score, *, sample_weight=None): y_true = check_array(y_true, ensure_2d=True) y_score = check_array(y_score, ensure_2d=True) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true, input_name="y_true") if y_type != "multilabel-indicator": raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight)
69,654
241,685
20
pytorch_lightning/callbacks/progress/base.py
6
6
def total_predict_batches(self) -> int: return sum(self.trainer.num_predict_batches)
Integrate progress tracking into the progress bar (#11213)
total_predict_batches
8a549a550cb10189ff1db382f546a40cd1c6c5b3
lightning
base.py
9
7
https://github.com/Lightning-AI/lightning.git
1
17
0
6
30
Python
{ "docstring": "The total number of prediction batches, which may change from epoch to epoch.\n\n Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader\n is of infinite size.\n ", "language": "en", "n_whitespaces": 58, "n_words": 37, "vocab_size": 30 }
def total_predict_batches(self) -> int: return sum(self.trainer.num_predict_batches)
21,975
104,793
35
src/datasets/dataset_dict.py
14
10
def shape(self) -> Dict[str, Tuple[int]]: self._check_values_type() return {k: datas
Add code examples for DatasetDict (#4245) * 📝 add code examples for DatasetDict * 🖍 apply quentin review
shape
1904d0c0a3a96330d9b870cdca3e9a3a137f2977
datasets
dataset_dict.py
9
14
https://github.com/huggingface/datasets.git
2
39
0
14
62
Python
{ "docstring": "Shape of each split of the dataset (number of columns, number of rows).\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.shape\n {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}\n ```\n ", "language": "en", "n_whitespaces": 92, "n_words": 36, "vocab_size": 29 }
def shape(self) -> Dict[str, Tuple[int]]: self._check_values_type() return {k: dataset.shape for k, dataset in self.items()}
6,560
36,018
71
src/transformers/onnx/config.py
17
8
def is_torch_support_available(self) -> bool: if is_torch_available(): from transformers.file_utils import t
Add ONNX export for ViT (#15658) * Add ONNX support for ViT * Refactor to use generic preprocessor * Add vision dep to tests * Extend ONNX slow tests to ViT * Add dummy image generator * Use model_type to determine modality * Add deprecation warnings for tokenizer argument * Add warning when overwriting the preprocessor * Add optional args to docstrings * Add minimum PyTorch version to OnnxConfig * Refactor OnnxConfig class variables from CONSTANT_NAME to snake_case * Add reasonable value for default atol Co-authored-by: Sylvain Gugger <[email protected]>
is_torch_support_available
50dd314d939a86f3a81e19af01459f449fbaeeca
transformers
config.py
9
12
https://github.com/huggingface/transformers.git
2
29
0
15
50
Python
{ "docstring": "\n The minimum PyTorch version required to export the model.\n\n Returns:\n `bool`: Whether the installed version of PyTorch is compatible with the model.\n ", "language": "en", "n_whitespaces": 55, "n_words": 22, "vocab_size": 17 }
def is_torch_support_available(self) -> bool: if is_torch_available(): from transformers.file_utils import torch_version return torch_version >= self.torch_onnx_minimum_version else: return False
589
3,887
106
airbyte-integrations/connectors/source-orb/source_orb/source.py
31
20
def check_connection(self, logger, config) -> Tuple[bool, any]: auth_header = TokenAuthenticator(token=config["api_key"]).get_auth_header() ping_url = ORB_API_BASE_URL + "ping" ping_response = requests.get(ping_url, headers=
🎉 New Source: Orb (#9985) * V1 of source_orb connector * add boostrap.md file * add clause on Pagination to bootstrap.md * add SUMMARY documentation * add lookback_window_days connector parameter * Add support for start_date parameter * Add ability to transform record in order to un-nest IDs * Add support for extracting event properties based on connector configuration
check_connection
1e0ac30ebdcfce55a5644bcd486044da45c93dd6
airbyte
source.py
13
13
https://github.com/airbytehq/airbyte.git
2
69
0
28
114
Python
{ "docstring": "\n Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate.\n API Docs: https://docs.withorb.com/reference/ping\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
def check_connection(self, logger, config) -> Tuple[bool, any]: auth_header = TokenAuthenticator(token=config["api_key"]).get_auth_header() ping_url = ORB_API_BASE_URL + "ping" ping_response = requests.get(ping_url, headers=auth_header) try: ping_response.raise_for_status() return True, None except Exception as e: return False, e
16,416
75,517
555
wagtail/search/backends/database/postgres/postgres.py
112
37
def add_items_upsert(self, content_type_pk, indexers): compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection) title_sql = [] autocomplete_sql
Reformat with black
add_items_upsert
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
postgres.py
13
47
https://github.com/wagtail/wagtail.git
3
260
0
67
417
Python
{ "docstring": "\n INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm)\n (VALUES %s)\n ON CONFLICT (content_type_id, object_id)\n DO UPDATE SET title = EXCLUDED.title,\n title_norm = 1.0,\n autocomplete = EXCLUDED.autocomplete,\n body = EXCLUDED.body\n ", "language": "en", "n_whitespaces": 193, "n_words": 30, "vocab_size": 26 }
def add_items_upsert(self, content_type_pk, indexers): compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection) title_sql = [] autocomplete_sql = [] body_sql = [] data_params = [] for indexer in indexers: data_params.extend((content_type_pk, indexer.id)) # Compile title value value = compiler.prepare_value( IndexEntry._meta.get_field("title"), indexer.title ) sql, params = value.as_sql(compiler, self.connection) title_sql.append(sql) data_params.extend(params) # Compile autocomplete value value = compiler.prepare_value( IndexEntry._meta.get_field("autocomplete"), indexer.autocomplete ) sql, params = value.as_sql(compiler, self.connection) autocomplete_sql.append(sql) data_params.extend(params) # Compile body value value = compiler.prepare_value( IndexEntry._meta.get_field("body"), indexer.body ) sql, params = value.as_sql(compiler, self.connection) body_sql.append(sql) data_params.extend(params) data_sql = ", ".join( [ "(%%s, %%s, %s, %s, %s, 1.0)" % (a, b, c) for a, b, c in zip(title_sql, autocomplete_sql, body_sql) ] ) with self.connection.cursor() as cursor: cursor.execute( % (IndexEntry._meta.db_table, data_sql), data_params, ) self._refresh_title_norms()
19,097
94,500
216
tests/sentry/sentry_metrics/test_all_indexers.py
108
27
def test_already_created_plus_written_results(indexer, indexer_cache) -> None: org_id = 1234 raw_indexer = indexer indexer = CachingIndexer(indexer_cache, indexer) v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0") v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1") v2 = raw_indexer.record(use_case_id, org_id, "v1.2.2") expected_mapping = {"v1.2.0": v0, "v1.2.1": v1, "v1.2.2": v2} results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2"}} ) assert len(results[org_id]) == len(expected_mapping) == 3 for string, id in results[org_id].items(): assert expected_mapping[string] == id results = indexer.bulk_record( use_case_
ref(metrics): Split caching out of indexers, random test refactoring [sns-1606] (#37714)
test_already_created_plus_written_results
7bbb85a0d95d23620228a02bb4401fc09658f5f1
sentry
test_all_indexers.py
13
32
https://github.com/getsentry/sentry.git
3
257
0
62
411
Python
{ "docstring": "\n Test that we correctly combine db read results with db write results\n for the same organization.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
def test_already_created_plus_written_results(indexer, indexer_cache) -> None: org_id = 1234 raw_indexer = indexer indexer = CachingIndexer(indexer_cache, indexer) v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0") v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1") v2 = raw_indexer.record(use_case_id, org_id, "v1.2.2") expected_mapping = {"v1.2.0": v0, "v1.2.1": v1, "v1.2.2": v2} results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2"}} ) assert len(results[org_id]) == len(expected_mapping) == 3 for string, id in results[org_id].items(): assert expected_mapping[string] == id results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2", "v1.2.3"}}, ) v3 = raw_indexer.resolve(use_case_id, org_id, "v1.2.3") expected_mapping["v1.2.3"] = v3 assert len(results[org_id]) == len(expected_mapping) == 4 for string, id in results[org_id].items(): assert expected_mapping[string] == id fetch_meta = results.get_fetch_metadata() assert_fetch_type_for_tag_string_set( fetch_meta[org_id], FetchType.CACHE_HIT, {"v1.2.0", "v1.2.1", "v1.2.2"} ) assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {"v1.2.3"})
55,634
219,596
740
python3.10.4/Lib/_osx_support.py
268
32
def compiler_fixup(compiler_so, cc_args): stripArch = stripSysroot = False compiler_so = list(compiler_so) if not _supports_universal_builds(): # OSX before 10.4.0, these don't support -arch and -isysroot at # all. stripArch = stripSysroot = True else: stripArch = '-arch' in cc_args stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot')) if stripArch or 'ARCHFLAGS' in os.environ: while True: try: index = compiler_so.index('-arch') # Strip this argument and the next one: del compiler_so[index:index+2] except ValueError: break elif not _supports_arm64_builds(): # Look for "-arch arm64" and drop that for idx in reversed(range(len(compiler_so))): if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64": del compiler_so[idx:idx+2] if 'ARCHFLAGS' in os.environ and not stripArch: # User specified different -arch flags in the environ, # see also distutils.sysconfig compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() if stripSysroot: while True: indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] if not indices: break index = indices[0] if compiler_so[index] == '-isysroot': # Strip this argument and the next one: del compiler_so[index:index+2] else: # It's '-isysroot/some/path' in one arg del compiler_so[index:index+1] # Check if the SDK that is used during compilation actually exists, # the universal build requires the usage of a universal SDK and not all # users have that install
add python 3.10.4 for windows
compiler_fixup
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_osx_support.py
16
49
https://github.com/XX-net/XX-Net.git
29
357
0
135
613
Python
{ "docstring": "\n This function will strip '-isysroot PATH' and '-arch ARCH' from the\n compile flags if the user has specified one them in extra_compile_flags.\n\n This is needed because '-arch ARCH' adds another architecture to the\n build, without a way to remove an architecture. Furthermore GCC will\n barf if multiple '-isysroot' arguments are present.\n ", "language": "en", "n_whitespaces": 70, "n_words": 51, "vocab_size": 43 }
def compiler_fixup(compiler_so, cc_args): stripArch = stripSysroot = False compiler_so = list(compiler_so) if not _supports_universal_builds(): # OSX before 10.4.0, these don't support -arch and -isysroot at # all. stripArch = stripSysroot = True else: stripArch = '-arch' in cc_args stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot')) if stripArch or 'ARCHFLAGS' in os.environ: while True: try: index = compiler_so.index('-arch') # Strip this argument and the next one: del compiler_so[index:index+2] except ValueError: break elif not _supports_arm64_builds(): # Look for "-arch arm64" and drop that for idx in reversed(range(len(compiler_so))): if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64": del compiler_so[idx:idx+2] if 'ARCHFLAGS' in os.environ and not stripArch: # User specified different -arch flags in the environ, # see also distutils.sysconfig compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() if stripSysroot: while True: indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] if not indices: break index = indices[0] if compiler_so[index] == '-isysroot': # Strip this argument and the next one: del compiler_so[index:index+2] else: # It's '-isysroot/some/path' in one arg del compiler_so[index:index+1] # Check if the SDK that is used during compilation actually exists, # the universal build requires the usage of a universal SDK and not all # users have that installed by default. sysroot = None argvar = cc_args indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')] if not indices: argvar = compiler_so indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] for idx in indices: if argvar[idx] == '-isysroot': sysroot = argvar[idx+1] break else: sysroot = argvar[idx][len('-isysroot'):] break if sysroot and not os.path.isdir(sysroot): sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n") sys.stderr.write("Please check your Xcode installation\n") sys.stderr.flush() return compiler_so
16,243
74,277
889
wagtail/core/tests/test_page_model.py
197
22
def test_copy_page_with_excluded_parental_and_child_relations(self): try: # modify excluded fields for this test EventPage.exclude_fields_in_copy = [ "advert_placements", "categories", "signup_link", ] # set up data christmas_event = EventPage.objects.get(url_path="/home/events/christmas/") summer_category = EventCategory.objects.create(name="Summer") holiday_category = EventCategory.objects.create(name="Holidays") # add URL (to test excluding a basic field) christmas_event.signup_link = "https://christmas-is-awesome.com/rsvp" # add parenta
Reformat with black
test_copy_page_with_excluded_parental_and_child_relations
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_page_model.py
14
45
https://github.com/wagtail/wagtail.git
2
190
0
107
337
Python
{ "docstring": "Test that a page will be copied with parental and child relations removed if excluded.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
def test_copy_page_with_excluded_parental_and_child_relations(self): try: # modify excluded fields for this test EventPage.exclude_fields_in_copy = [ "advert_placements", "categories", "signup_link", ] # set up data christmas_event = EventPage.objects.get(url_path="/home/events/christmas/") summer_category = EventCategory.objects.create(name="Summer") holiday_category = EventCategory.objects.create(name="Holidays") # add URL (to test excluding a basic field) christmas_event.signup_link = "https://christmas-is-awesome.com/rsvp" # add parental many to many relations christmas_event.categories = (summer_category, holiday_category) christmas_event.save() # Copy it new_christmas_event = christmas_event.copy( update_attrs={ "title": "New christmas event", "slug": "new-christmas-event", } ) # check that the signup_link was NOT copied self.assertEqual( christmas_event.signup_link, "https://christmas-is-awesome.com/rsvp" ) self.assertEqual(new_christmas_event.signup_link, "") # check that original event is untouched self.assertEqual( christmas_event.categories.count(), 2, "Child objects (parental many to many) defined on the superclass were removed from the original page", ) # check that parental many to many are NOT copied self.assertEqual( new_christmas_event.categories.count(), 0, "Child objects (parental many to many) were copied but should be excluded", ) # check that child objects on original event were left untouched self.assertEqual( christmas_event.advert_placements.count(), 1, "Child objects defined on the original superclass were edited when copied", ) # check that child objects were NOT copied self.assertEqual( new_christmas_event.advert_placements.count(), 0, "Child objects defined on the superclass were copied and should not be", ) finally: # reset excluded fields for future tests EventPage.exclude_fields_in_copy = []
52,056
207,694
32
tests/admin_views/tests.py
11
7
def test_app_model_in_list_body_class(self):
Refs #33476 -- Reformatted code with Black.
test_app_model_in_list_body_class
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
11
3
https://github.com/django/django.git
1
27
0
11
50
Python
{ "docstring": "\n Ensure app and model tag are correctly read by change_list template\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def test_app_model_in_list_body_class(self): response = self.client.get(reverse("admin:admin_views_section_changelist")) self.assertContains(response, '<body class=" app-admin_views model-section ')
3,214
20,068
20
pipenv/patched/notpip/_vendor/distro.py
11
3
def version_parts(best=False): # type: (bool) -> Tuple[s
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
version_parts
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
distro.py
7
2
https://github.com/pypa/pipenv.git
1
15
0
11
28
Python
{ "docstring": "\n Return the version of the current OS distribution as a tuple\n ``(major, minor, build_number)`` with items as follows:\n\n * ``major``: The result of :func:`distro.major_version`.\n\n * ``minor``: The result of :func:`distro.minor_version`.\n\n * ``build_number``: The result of :func:`distro.build_number`.\n\n For a description of the *best* parameter, see the :func:`distro.version`\n method.\n ", "language": "en", "n_whitespaces": 75, "n_words": 47, "vocab_size": 32 }
def version_parts(best=False): # type: (bool) -> Tuple[str, str, str] return _distro.version_parts(best)
25,267
114,734
182
mindsdb/integrations/mssql_handler/mssql_handler.py
42
11
def check_status(self): status = { 'success': False } try: con = s
Add sql server handler
check_status
cf75c4186e1caa36b18c9ddffce98da94b9904e6
mindsdb
mssql_handler.py
13
12
https://github.com/mindsdb/mindsdb.git
2
56
0
36
116
Python
{ "docstring": "\n Check the connection of the SQL Server database\n :return: success status and error message if error occurs\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
def check_status(self): status = { 'success': False } try: con = self.__connect() with closing(con) as con: #TODO: best way to check con.connected ? status['success'] = True except Exception as e: log.error(f'Error connecting to SQL Server {self.database}, {e}!') status['error'] = e return status
@keras_export("keras.models.model_from_yaml")
81,500
275,885
96
keras/saving/model_config.py
37
10
def model_from_config(config, custom_objects=None): if isinstance(config, list): raise TypeError( "`model_from_config` expects a dictionary, not a list. " f"Received: co
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
model_from_config
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
model_config.py
12
9
https://github.com/keras-team/keras.git
2
41
1
35
85
Python
{ "docstring": "Instantiates a Keras model from its config.\n\n Usage:\n ```\n # for a Functional API model\n tf.keras.Model().from_config(model.get_config())\n\n # for a Sequential model\n tf.keras.Sequential().from_config(model.get_config())\n ```\n\n Args:\n config: Configuration dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n A Keras model instance (uncompiled).\n\n Raises:\n TypeError: if `config` is not a dictionary.\n ", "language": "en", "n_whitespaces": 140, "n_words": 57, "vocab_size": 45 }
def model_from_config(config, custom_objects=None): if isinstance(config, list): raise TypeError( "`model_from_config` expects a dictionary, not a list. " f"Received: config={config}. Did you meant to use " "`Sequential.from_config(config)`?" ) from keras.layers import deserialize # pylint: disable=g-import-not-at-top return deserialize(config, custom_objects=custom_objects) @keras_export("keras.models.model_from_yaml")
16,242
74,269
41
wagtail/core/tests/test_page_model.py
9
12
def test_golden_path(self): with self.assertNumQueries(0): result = self.page.cached_
Reformat with black
test_golden_path
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_page_model.py
12
4
https://github.com/wagtail/wagtail.git
1
42
0
9
71
Python
{ "docstring": "\n The return value should match the value you'd get\n if fetching the ContentType from the database,\n and shouldn't trigger any database queries when\n the ContentType is already in memory.\n ", "language": "en", "n_whitespaces": 65, "n_words": 29, "vocab_size": 24 }
def test_golden_path(self): with self.assertNumQueries(0): result = self.page.cached_content_type self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id))
18,484
88,971
44
src/sentry/lang/javascript/processor.py
22
12
def fold_function_name(function_name): parts = function_name.split(".") if len(parts) == 1: retu
ref(processor): Fold occurences of property names in function_name (#41697) Fold multiple consecutive occurrences of the same property name into a single group, excluding the last component. ``` foo | foo foo.foo | foo.foo foo.foo.foo | {foo#2}.foo bar.foo.foo | bar.foo.foo bar.foo.foo.foo | bar.{foo#2}.foo bar.foo.foo.onError | bar.{foo#2}.onError bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError ``` This is mostly done for React, where some frames have function name like `<object>.children.children.children.onSubmitError` when function is a prop passed down the component stack.
fold_function_name
8078d89b46841c7f7a57cc49a4b9cafb42b12ce0
sentry
processor.py
10
8
https://github.com/getsentry/sentry.git
3
53
0
20
82
Python
{ "docstring": "\n Fold multiple consecutive occurences of the same property name into a single group, excluding the last component.\n\n foo | foo\n foo.foo | foo.foo\n foo.foo.foo | {foo#2}.foo\n bar.foo.foo | bar.foo.foo\n bar.foo.foo.foo | bar.{foo#2}.foo\n bar.foo.foo.onError | bar.{foo#2}.onError\n bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError\n bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError\n ", "language": "en", "n_whitespaces": 72, "n_words": 41, "vocab_size": 30 }
def fold_function_name(function_name): parts = function_name.split(".") if len(parts) == 1: return function_name tail = parts.pop() grouped = [list(g) for _, g in groupby(parts)]
49,675
200,507
238
sympy/integrals/transforms.py
73
28
def _laplace_rule_exp(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') z = Wild('z') k, func = f.as_independent(t, as_Add=False) ma1 = func.match(exp(y)*z) if ma1: ma2 = ma1[y].collect(t).match(a*t) if ma2: debug('_laplace_apply_rules match:') debug(' f: %s ( %s, %s )'%(f, ma1, ma2)) debug(' rule: multiply with exp (1.5)') L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=do
Fixed Issue #24294
_laplace_rule_exp
807f499971f9c298bc6bacbb08bcb19299fbb42c
sympy
transforms.py
14
20
https://github.com/sympy/sympy.git
4
178
0
59
283
Python
{ "docstring": "\n This internal helper function tries to transform a product containing the\n `exp` function and returns `None` if it cannot do it.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 20 }
def _laplace_rule_exp(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') z = Wild('z') k, func = f.as_independent(t, as_Add=False) ma1 = func.match(exp(y)*z) if ma1: ma2 = ma1[y].collect(t).match(a*t) if ma2: debug('_laplace_apply_rules match:') debug(' f: %s ( %s, %s )'%(f, ma1, ma2)) debug(' rule: multiply with exp (1.5)') L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints) try: r, p, c = L return (k*r, p+ma2[a], c) except TypeError: return k*L return None
12,099
60,370
68
code/deep/BJMMD/caffe/scripts/cpp_lint.py
37
9
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): for i in xrange(startpos, len(line)): if line[i] == startchar: depth += 1 elif line[i] == endchar: depth -= 1 if depth == 0: return (i + 1, 0) return (-1, depth)
Balanced joint maximum mean discrepancy for deep transfer learning
FindEndOfExpressionInLine
cc4d0564756ca067516f71718a3d135996525909
transferlearning
cpp_lint.py
14
9
https://github.com/jindongwang/transferlearning.git
5
69
0
29
103
Python
{ "docstring": "Find the position just after the matching endchar.\n\n Args:\n line: a CleansedLines line.\n startpos: start searching at this position.\n depth: nesting level at startpos.\n startchar: expression opening character.\n endchar: expression closing character.\n\n Returns:\n On finding matching endchar: (index just after matching endchar, 0)\n Otherwise: (-1, new depth at end of this line)\n ", "language": "en", "n_whitespaces": 76, "n_words": 52, "vocab_size": 41 }
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): for i in xrange(startpos, len(line)): if line[i] == startchar: depth += 1 elif line[i] == endchar: depth -= 1 if depth == 0: return (i + 1, 0) return (-1, depth)
76,472
260,762
40
sklearn/neighbors/tests/test_nca.py
22
14
def test_toy_example_collapse_points(): rng = np.random.RandomState(42) input_dim = 5 two_points = rng.randn(2, input_dim)
MAINT Parameters validation for NeighborhoodComponentsAnalysis (#24195) Co-authored-by: jeremie du boisberranger <[email protected]>
test_toy_example_collapse_points
d7c978b764c6aafb65cc28757baf3f64da2cae34
scikit-learn
test_nca.py
13
15
https://github.com/scikit-learn/scikit-learn.git
1
132
0
18
99
Python
{ "docstring": "Test on a toy example of three points that should collapse\n\n We build a simple example: two points from the same class and a point from\n a different class in the middle of them. On this simple example, the new\n (transformed) points should all collapse into one single point. Indeed, the\n objective is 2/(1 + exp(d/2)), with d the euclidean distance between the\n two samples from the same class. This is maximized for d=0 (because d>=0),\n with an objective equal to 1 (loss=-1.).\n\n ", "language": "en", "n_whitespaces": 104, "n_words": 83, "vocab_size": 60 }
def test_toy_example_collapse_points(): rng = np.random.RandomState(42) input_dim = 5 two_points = rng.randn(2, input_dim) X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]]) y = [0, 0, 1]
19,027
93,936
264
tests/sentry/sentry_metrics/test_batch.py
31
9
def _get_string_indexer_log_records(caplog): return [ ( rec.message, { k: v for k, v in rec.__dict__.items() if k
ref(metrics_indexer): Improve typing, introduce more dataclasses, fix org_id namespacing bug in metadata [INGEST-1380] (#37170)
_get_string_indexer_log_records
f31b57cbc5ec359c8ef9c6459d3d9d8ffcd6e8d9
sentry
test_batch.py
12
19
https://github.com/getsentry/sentry.git
4
54
0
24
88
Python
{ "docstring": "\n Get all log records and relevant extra arguments for easy snapshotting.\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
def _get_string_indexer_log_records(caplog): return [ ( rec.message, { k: v for k, v in rec.__dict__.items() if k in ( "string_type", "is_global_quota", "num_global_quotas", "num_global_quotas", "org_batch_size", ) }, ) for rec in caplog.records ]
93,990
294,963
71
tests/components/subaru/test_config_flow.py
23
16
async def test_registered_pin_required(hass, user_form): with patch(MOCK_API_CONNECT, return_value=True), patch( MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True): mock_device_registered.return_value = True await hass.config_entries.flow.async_configure(
Add 2FA support for Subaru integration setup (#68753) * Add 2FA support for Subaru integration setup * Update config flow to abort with 2FA request fail
test_registered_pin_required
ab0abdc988ac101217ba043909c4be8b33101ab3
core
test_config_flow.py
12
8
https://github.com/home-assistant/core.git
1
61
0
22
100
Python
{ "docstring": "Test if the device is already registered and PIN required.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
async def test_registered_pin_required(hass, user_form): with patch(MOCK_API_CONNECT, return_value=True), patch( MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True): mock_device_registered.return_value = True await hass.config_entries.flow.async_configure( user_form["flow_id"], user_input=TEST_CREDS )
73,197
249,920
674
tests/replication/test_pusher_shard.py
133
22
def test_send_push_multiple_workers(self): http_client_mock1 = Mock(spec_set=["post_json_get_json"]) http_client_mock1.post_jso
Modernize unit tests configuration settings for workers. (#14568) Use the newer foo_instances configuration instead of the deprecated flags to enable specific features (e.g. start_pushers).
test_send_push_multiple_workers
854a6884d81c95297bf93badcddc00a4cab93418
synapse
test_pusher_shard.py
13
55
https://github.com/matrix-org/synapse.git
1
278
0
71
473
Python
{ "docstring": "Test that registration works when using sharded pusher workers.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_send_push_multiple_workers(self): http_client_mock1 = Mock(spec_set=["post_json_get_json"]) http_client_mock1.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( "synapse.app.generic_worker", { "worker_name": "pusher1", "pusher_instances": ["pusher1", "pusher2"], }, proxied_blacklisted_http_client=http_client_mock1, ) http_client_mock2 = Mock(spec_set=["post_json_get_json"]) http_client_mock2.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( "synapse.app.generic_worker", { "worker_name": "pusher2", "pusher_instances": ["pusher1", "pusher2"], }, proxied_blacklisted_http_client=http_client_mock2, ) # We choose a user name that we know should go to pusher1. event_id = self._create_pusher_and_send_msg("user2") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock1.post_json_get_json.assert_called_once() http_client_mock2.post_json_get_json.assert_not_called() self.assertEqual( http_client_mock1.post_json_get_json.call_args[0][0], "https://push.example.com/_matrix/push/v1/notify", ) self.assertEqual( event_id, http_client_mock1.post_json_get_json.call_args[0][1]["notification"][ "event_id" ], ) http_client_mock1.post_json_get_json.reset_mock() http_client_mock2.post_json_get_json.reset_mock() # Now we choose a user name that we know should go to pusher2. event_id = self._create_pusher_and_send_msg("user4") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock1.post_json_get_json.assert_not_called() http_client_mock2.post_json_get_json.assert_called_once() self.assertEqual( http_client_mock2.post_json_get_json.call_args[0][0], "https://push.example.com/_matrix/push/v1/notify", ) self.assertEqual( event_id, http_client_mock2.post_json_get_json.call_args[0][1]["notification"][ "event_id" ], )