complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
2
is_guessed_to_be_created_on_project_creation
def is_guessed_to_be_created_on_project_creation(self) -> bool: # TODO(mgaeta): Bug: Rule is optional. delta = abs(self.rule.date_added - self.project.date_added) guess: bool = delta.total_seconds() < 30 and self.rule.label == DEFAULT_RULE_LABEL return guess
654c6627307359956c6d44f83791d6b177841363
11
event_frequency.py
75
ref(types): Add types to conditions and filters (#32393)
19,359
0
62
45
26
96,980
27
sentry
12
src/sentry/rules/conditions/event_frequency.py
Python
13
{ "docstring": "\n Best effort approximation on whether a rule with this condition was\n created on project creation based on how closely the rule and project\n are created; and if the label matches the default name used on project\n creation.\n\n :return:\n bool: True if rule is approximated to be created on project creation, False otherwise.\n ", "language": "en", "n_whitespaces": 106, "n_words": 52, "vocab_size": 38 }
https://github.com/getsentry/sentry.git
1
test_ohe_drop_first_explicit_categories
def test_ohe_drop_first_explicit_categories(handle_unknown): X = [["a", 0], ["b", 2], ["b", 1]] ohe = OneHotEncoder( drop="first", sparse=False, handle_unknown=handle_unknown, categories=[["b", "a"], [1, 2]], ) ohe.fit(X) X_test = [["c", 1]] X_expected = np.array([[0, 0]]) warn_msg = ( r"Found unknown categories in columns \[0\] during transform. " r"These unknown categories will be encoded as all zeros" ) with pytest.warns(UserWarning, match=warn_msg): X_trans = ohe.transform(X_test) assert_allclose(X_trans, X_expected)
7f0006c8aad1a09621ad19c3db19c3ff0555a183
12
test_encoders.py
198
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
75,666
0
142
123
50
259,234
60
scikit-learn
21
sklearn/preprocessing/tests/test_encoders.py
Python
18
{ "docstring": "Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist'\n during fit with categories passed in.", "language": "en", "n_whitespaces": 12, "n_words": 10, "vocab_size": 10 }
https://github.com/scikit-learn/scikit-learn.git
8
ip6_interfaces
def ip6_interfaces(): # Provides: # ip_interfaces if salt.utils.platform.is_proxy(): return {} ret = {} ifaces = _get_interfaces() for face in ifaces: iface_ips = [] for inet in ifaces[face].get("inet6", []): if "address" in inet: iface_ips.append(inet["address"]) for secondary in ifaces[face].get("secondary", []): if "address" in secondary and secondary.get("type") == "inet6": iface_ips.append(secondary["address"]) ret[face] = iface_ips return {"ip6_interfaces": ret}
75c0cb7181d14f780b24ee5dd126f2836730053b
15
core.py
207
Filter secondary IP address by type (#61434) * Add filter for secondary ip addresses Should improve #61370 * Remove unnecessary space * Add test case for secondary IP address Test data for IPv6 secondary IP looks wrong but this is what _interfaces_ip() could return looking at the current code * Change order of tests because of caching issues Change order of test_network_grains_secondary_ip and test_network_grains_cache because of caching issues when running after test_network_grains_cache * Unify style in _interfaces_ip Unify coding style in _interfaces_ip for secondary ip addresses with the style for regular addresses. Also align the attributes for IPv6 secondary ip addresses with regular ipv6 addresses * Align IPv6 secondary IP attributes with changes to _interfaces_ip * Add changelog for fix of issue 61370 * Use salt.loader.grain_funcs for secondary ip test To work around caching issues when changing order of test_network_grains_cache and test_network_grains_secondary_ip use use salt.loader.grain_funcs in both functions. Also we hope this solves the issue, that this test worked in my local dev environment but not on the saltstack jenkins instances. * Use side_effect to simulate test data I don't understand what is different when these tests are run on the Jenkins infrastructure. Hope copying this from test_network_grains_cache make the tests work on them. * Changed checking for secondaryip address type * Add filter for secondary ip addresses Should improve #61370 * Remove unnecessary space * Add test case for secondary IP address Test data for IPv6 secondary IP looks wrong but this is what _interfaces_ip() could return looking at the current code * Change order of tests because of caching issues Change order of test_network_grains_secondary_ip and test_network_grains_cache because of caching issues when running after test_network_grains_cache * Unify style in _interfaces_ip Unify coding style in _interfaces_ip for secondary ip addresses with the style for regular addresses. Also align the attributes for IPv6 secondary ip addresses with regular ipv6 addresses * Align IPv6 secondary IP attributes with changes to _interfaces_ip * Add changelog for fix of issue 61370 * Use salt.loader.grain_funcs for secondary ip test To work around caching issues when changing order of test_network_grains_cache and test_network_grains_secondary_ip use use salt.loader.grain_funcs in both functions. Also we hope this solves the issue, that this test worked in my local dev environment but not on the saltstack jenkins instances. * Use side_effect to simulate test data I don't understand what is different when these tests are run on the Jenkins infrastructure. Hope copying this from test_network_grains_cache make the tests work on them. * Changed checking for secondaryip address type * Satisfy black code formatting Co-authored-by: Shane Lee <[email protected]> Co-authored-by: mayrstefan <[email protected]>
54,487
0
166
118
35
216,260
53
salt
14
salt/grains/core.py
Python
15
{ "docstring": "\n Provide a dict of the connected interfaces and their ip6 addresses\n The addresses will be passed as a list for each interface\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 20 }
https://github.com/saltstack/salt.git
9
match
def match(self, node, results=None): if self.type is not None and node.type != self.type: return False if self.content is not None: r = None if results is not None: r = {} if not self._submatch(node, r): return False if r: results.update(r) if results is not None and self.name: results[self.name] = node return True
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
pytree.py
145
add python 3.10.4 for windows
55,510
0
198
93
29
218,860
52
XX-Net
10
python3.10.4/Lib/lib2to3/pytree.py
Python
14
{ "docstring": "\n Does this pattern exactly match a node?\n\n Returns True if it matches, False if not.\n\n If results is not None, it must be a dict which will be\n updated with the nodes matching named subpatterns.\n\n Default implementation for non-wildcard patterns.\n ", "language": "en", "n_whitespaces": 83, "n_words": 40, "vocab_size": 36 }
https://github.com/XX-net/XX-Net.git
1
reset
def reset(self) -> None: self._recording_start = dt_util.utcnow() self._current_run_info = None
f073f170402bd02e6d6c7597ce5d842a016e97be
8
run_history.py
39
Refactor tracking of the recorder run history (#70456) Co-authored-by: Erik Montnemery <[email protected]>
98,089
0
31
22
9
299,152
10
core
6
homeassistant/components/recorder/run_history.py
Python
7
{ "docstring": "Reset the run when the database is changed or fails.\n\n Must run in the recorder thread.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 13 }
https://github.com/home-assistant/core.git
2
position_cursor
def position_cursor(self) -> Control: if self._shape is not None: _, height = self._shape return Control( ControlType.CARRIAGE_RETURN, (ControlType.ERASE_IN_LINE, 2), *( ( (ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2), ) * (height - 1) ) ) return Control()
f3166e673fe8d40277b804d35d77dcdb760fc3b3
15
live_render.py
105
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,537
0
250
70
27
20,773
33
pipenv
10
pipenv/patched/notpip/_vendor/rich/live_render.py
Python
20
{ "docstring": "Get control codes to move cursor to beginning of live render.\n\n Returns:\n Control: A control instance that may be printed.\n ", "language": "en", "n_whitespaces": 45, "n_words": 20, "vocab_size": 18 }
https://github.com/pypa/pipenv.git
2
is_fedora
def is_fedora(): (osname, osrelease, oscodename) = ( x.strip('"').strip("'") for x in linux_distribution() ) return osname == "Fedora" @real_memoize
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
@real_memoize
12
platform.py
68
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
54,302
1
36
36
18
215,982
18
salt
8
salt/utils/platform.py
Python
5
{ "docstring": "\n Simple function to return if host is Fedora or not\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/saltstack/salt.git
1
default_prng_impl
def default_prng_impl(): impl_name = config.jax_default_prng_impl assert impl_name in PRNG_IMPLS, impl_name return PRNG_IMPLS[impl_name] ### key operations
026b91b85db17bb60d49309da7698d33122f751f
7
random.py
36
add `random.default_prng_impl` to retrieve the default PRNG implementation
26,542
0
18
21
13
119,074
15
jax
5
jax/_src/random.py
Python
4
{ "docstring": "Get the default PRNG implementation.\n\n The default implementation is determined by ``config.jax_default_prng_impl``,\n which specifies it by name. This function returns the corresponding\n ``jax.prng.PRNGImpl`` instance.\n ", "language": "en", "n_whitespaces": 28, "n_words": 24, "vocab_size": 21 }
https://github.com/google/jax.git
1
test_all
def test_all(self): assert validate(all(int, lambda n: 0 < n < 5), 3) == 3 assert validate(all(transform(int), lambda n: 0 < n < 5), 3.33) == 3 with self.assertRaises(ValueError) as cm: validate(all(int, float), 123) assert_validationerror(cm.exception, )
3d44da082b3ba202b9d0557bfd8ce747a1d7960c
12
test_api_validate.py
123
plugin.api.validate: implement ValidationError - Implement `ValidationError` - Inherit from `ValueError` to preserve backwards compatiblity - Allow collecting multiple errors (AnySchema) - Keep an error stack of parent `ValidationError`s or other exceptions - Format error stack when converting error to string - Raise `ValidationError` instead of `ValueError` - Add error contexts where it makes sense - Add schema names to error instances - Add and update tests
45,704
0
73
81
23
187,143
35
streamlink
13
tests/test_api_validate.py
Python
9
{ "docstring": "\n ValidationError(type):\n Type of 123 should be 'float', but is 'int'\n ", "language": "en", "n_whitespaces": 42, "n_words": 10, "vocab_size": 10 }
https://github.com/streamlink/streamlink.git
1
temporal_padding
def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] return tf.compat.v1.pad(x, pattern) @keras_export("keras.backend.spatial_2d_padding") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.spatial_2d_padding") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
9
backend.py
117
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,237
1
31
61
22
269,618
22
keras
15
keras/backend.py
Python
14
{ "docstring": "Pads the middle dimension of a 3D tensor.\n\n Args:\n x: Tensor or variable.\n padding: Tuple of 2 integers, how many zeros to\n add at the start and end of dim 1.\n\n Returns:\n A padded 3D tensor.\n ", "language": "en", "n_whitespaces": 77, "n_words": 36, "vocab_size": 31 }
https://github.com/keras-team/keras.git
1
set_login_api_ready_event
def set_login_api_ready_event(): login_api.extra["ready-event"].set() login_api = FastAPI(on_startup=[set_login_api_ready_event]) login_api.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], )
1a6dee5e9eb71e6e6d1d3492002e9cd674ab9f9b
9
cloud.py
87
Add login with a browser to `prefect cloud login` (#7334)
11,953
0
23
14
12
59,848
12
prefect
11
src/prefect/cli/cloud.py
Python
2
{ "docstring": "\nThis small API server is used for data transmission for browser-based log in.\n", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
https://github.com/PrefectHQ/prefect.git
2
hide_splashscreen
def hide_splashscreen(): try: import pyi_splash # type: ignore # pylint: disable=import-outside-toplevel pyi_splash.update_text("Terminal Loaded!") pyi_splash.close() except Exception as e: logger.info(e)
ab4de1dd70fba866930150e440a03e461a6ca6a8
10
terminal_helper.py
61
Create a packaged app bundle with Pyinstaller (#1525) * Add dashboard widget assets * Add ipywidgets and ipyflex to project * Add currencies dashboard notebook * Update docs and docstrings * Add pyinstaller to project deps * Add pyinstaller artifacts to gitignore * Fix linter errors in terminal.py * Update cspell hook and action with a pyinstaller specific word * Add pyinstaller specfile and artifacts * Add splashscreen image * Add app icon * adding splash screen support to terminal.spec and terminal.py * Restore the conda env build files * Sync deps * Add border to the splashscreen image * Clean up terminal launcher * Add support for default feature flags in packages apps * Fix types and linting * Add splashscreen management to app bootup * Check prediction feature flag when entering crypto/pred * Update pyinstaller spec file * fix .spec file to work for splash and icon - removed the ".." * Allows to export when using installer (#1568) * fix export for packaged apps * fix filename * Git : replace commit_hash when it is set in config_terminal * Add update of the git commit hash in gtff default during build * Add packaged app name and feature flag to logs * Add platform specific icon assignment * Add macOS build assets * Add tensorflow to hidden imports * Move LOGGING_COMMIT_HASH to gtff * Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again. * Linting * Workflow : ignore ./build/pyinstaller from codespell * Workflow : exclude ./build/pyinstaller from flake8 * Poetry + Workflow : add types-six * Pyinstaller : remove property_cached, user_agent and vaderSentiment * Revert "Pyinstaller : remove property_cached, user_agent and vaderSentiment" This reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703. * Clean up local paths in specfile * Validate deps have correct Jinja version (they do) * Fix logging commit hash to be set correctly for the logger to see it Co-authored-by: Andrew <[email protected]> Co-authored-by: didierlopes.eth <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
84,482
0
58
31
18
283,232
19
OpenBBTerminal
8
gamestonk_terminal/terminal_helper.py
Python
7
{ "docstring": "Hide the splashscreen on Windows bundles.\n\n `pyi_splash` is a PyInstaller \"fake-package\" that's used to communicate\n with the splashscreen on Windows.\n Sending the `close` signal to the splash screen is required.\n The splash screen remains open until this function is called or the Python\n program is terminated.\n ", "language": "en", "n_whitespaces": 64, "n_words": 46, "vocab_size": 34 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
predict
def predict(self, data, tokenizer): batchify_fn = lambda samples, fn=Tuple( Pad(axis=0, pad_val=tokenizer.pad_token_id), # input Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment ): fn(samples) all_embeddings = [] examples = [] for idx, text in enumerate(tqdm(data)): input_ids, segment_ids = convert_example( text, tokenizer, max_seq_length=self.max_seq_length, pad_to_max_seq_len=True) examples.append((input_ids, segment_ids)) if (len(examples) > 100): input_ids, segment_ids = batchify_fn(examples) self.input_handles[0].copy_from_cpu(input_ids) self.input_handles[1].copy_from_cpu(segment_ids) self.predictor.run() logits = self.output_handle.copy_to_cpu() all_embeddings.append(logits) examples = [] all_embeddings = np.concatenate(all_embeddings, axis=0) np.save('corpus_embedding', all_embeddings)
4a1b98b9390aa0ec9c16530b28ba8d311787867b
15
feature_extract.py
295
Update Readme and Update Mivus feature extraction module (#1699) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code * update changes * Add C++ Paddle Serving * Update Readme and Update Mivus feature extraction module * check formats * update serving config * deleted redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
118,117
0
342
188
48
322,298
64
PaddleNLP
35
applications/neural_search/recall/milvus/feature_extract.py
Python
24
{ "docstring": "\n Predicts the data labels.\n\n Args:\n data (obj:`List(str)`): The batch data whose each element is a raw text.\n tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer` \n which contains most of the methods. Users should refer to the superclass for more information regarding methods.\n\n Returns:\n results(obj:`dict`): All the predictions labels.\n ", "language": "en", "n_whitespaces": 124, "n_words": 46, "vocab_size": 39 }
https://github.com/PaddlePaddle/PaddleNLP.git
1
get_tables
def get_tables(self) -> Response: q = f return self.native_query(q)
b4b66f241b6b2905e1dba81c42c2edd095c257bc
9
vertica_handler.py
41
ALMOST Completed But Dialect not working
25,565
0
39
19
9
115,816
9
mindsdb
6
mindsdb/integrations/handlers/vertica_handler/vertica_handler.py
Python
12
{ "docstring": "\n Get a list with all of the tabels in VERTICA\n SELECT \n TABLE_NAME,\n TABLE_SCHEMA\n from v_catalog.tables \n WHERE table_schema='{self.schema_name}' \n order by\n table_name;", "language": "en", "n_whitespaces": 79, "n_words": 20, "vocab_size": 20 }
https://github.com/mindsdb/mindsdb.git
3
upsample_2d
def upsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor ** 2)) p = k.shape[0] - factor return _simple_upfirdn_2d(x, k, up=factor, pad0=(p+1)//2+factor-1, pad1=p//2, data_format=data_format, impl=impl) #----------------------------------------------------------------------------
7375ee364e0df2a417f92593e09557f1b2a3575a
13
upfirdn_2d.py
171
initialize ostec
1,605
0
70
95
39
9,405
47
insightface
16
reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py
Python
29
{ "docstring": "Upsample a batch of 2D images with the given filter.\n\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`\n and upsamples each image with the given filter. The filter is normalized so that\n if the input pixels are constant, they will be scaled by the specified `gain`.\n Pixels outside the image are assumed to be zero, and the filter is padded with\n zeros so that its shape is a multiple of the upsampling factor.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n The default is `[1] * factor`, which corresponds to nearest-neighbor\n upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or\n `[N, H * factor, W * factor, C]`, and same datatype as `x`.\n ", "language": "en", "n_whitespaces": 348, "n_words": 181, "vocab_size": 105 }
https://github.com/deepinsight/insightface.git
1
copy
def copy(self, message_set, new_mailbox): return self._simple_command('COPY', message_set, new_mailbox)
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
imaplib.py
34
add python 3.10.4 for windows
55,013
0
22
21
7
217,918
8
XX-Net
5
python3.10.4/Lib/imaplib.py
Python
2
{ "docstring": "Copy 'message_set' messages onto end of 'new_mailbox'.\n\n (typ, [data]) = <instance>.copy(message_set, new_mailbox)\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
https://github.com/XX-net/XX-Net.git
4
extract_relative_time
def extract_relative_time(relative_time_text): mobj = re.search(r'(?P<start>today|yesterday|now)|(?P<time>\d+)\s*(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?\s*ago', relative_time_text) if mobj: start = mobj.group('start') if start: return datetime_from_str(start) try: return datetime_from_str('now-%s%s' % (mobj.group('time'), mobj.group('unit'))) except ValueError: return None
f0d785d3ed59e879a69f69f3c9334754f11747e0
16
youtube.py
113
[youtube:tab] Extract more playlist metadata (#2069) * Add fields modified_date, modified_timestamp * Add field playlist_count * [youtube:tab] Extract view_count, playlist_count, modified_date Authored by: coletdjnz, pukkandan
39,169
0
135
64
21
162,160
25
yt-dlp
9
yt_dlp/extractor/youtube.py
Python
10
{ "docstring": "\n Extracts a relative time from string and converts to dt object\n e.g. 'streamed 6 days ago', '5 seconds ago (edited)', 'updated today'\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 22 }
https://github.com/yt-dlp/yt-dlp.git
8
make_parser
def make_parser(self, parser, optname, metavar=None, short=None): if optname not in self._options: return o = self._options[optname]
fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8
8
optmanager.py
53
use Python 3.9+ typing
73,636
0
47
308
15
251,210
15
mitmproxy
8
mitmproxy/optmanager.py
Python
58
{ "docstring": "\n Auto-Create a command-line parser entry for a named option. If the\n option does not exist, it is ignored.\n ", "language": "en", "n_whitespaces": 48, "n_words": 18, "vocab_size": 17 }
https://github.com/mitmproxy/mitmproxy.git
9
update
def update(self, value=None, background_color=None, text_color=None, font=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if value is not None: self.DisplayText = str(value) self.TKStringVar.set(str(value)) if background_color not in (None, COLOR_SYSTEM_DEFAULT): self.TKText.configure(background=background_color) if text_color not in (None, COLOR_SYSTEM_DEFAULT): self.TKText.configure(fg=text_color) if font is not None: self.TKText.configure(font=font) if visible is False: element._pack_forget_save_settings() # self.TKText.pack_forget() elif visible is True: self._pack_restore_settings() # self.TKText.pack(padx=self.pad_used[0], pady=self.pad_used[1]) if visible is not None: self._visible = visible
e575a0b8dc72561ce6565edaf804dc8c6b5053e5
11
PySimpleGUI.py
233
Fixed problem with making elements invisible causing the pack settings to be lost. Converted Text, Input, Multiline, StatusBar, Frame, Combo to see if this is the right approach
53,445
0
258
147
46
212,837
73
PySimpleGUI
21
PySimpleGUI.py
Python
18
{ "docstring": "\n Changes some of the settings for the Text Element. Must call `Window.Read` or `Window.Finalize` prior\n\n Changes will not be visible in your window until you call window.read or window.refresh.\n\n If you change visibility, your element may MOVE. If you want it to remain stationary, use the \"layout helper\"\n function \"pin\" to ensure your element is \"pinned\" to that location in your layout so that it returns there\n when made visible.\n\n :param value: new text to show\n :type value: (str)\n :param background_color: color of background\n :type background_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param visible: set visibility state of the element\n :type visible: (bool)\n ", "language": "en", "n_whitespaces": 335, "n_words": 140, "vocab_size": 95 }
https://github.com/PySimpleGUI/PySimpleGUI.git
2
get_template_name
def get_template_name(self): if self.template_name is not None: return self.template_name model_opts = self.queryset.model._meta return f'{model_opts.app_label}/{model_opts.model_name}.html'
54834c47f8870e7faabcd847c3270da0bd3d2884
9
object_views.py
64
Refactor generic views; add plugins dev documentation
77,675
0
53
30
12
264,304
14
netbox
9
netbox/netbox/views/generic/object_views.py
Python
5
{ "docstring": "\n Return self.template_name if defined. Otherwise, dynamically resolve the template name using the queryset\n model's `app_label` and `model_name`.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
https://github.com/netbox-community/netbox.git
2
persist
def persist(self, backfill=False, **kwargs) -> FrozenEvent: event, context = self.build_event(**kwargs) if backfill: self.get_success( self._storage_controllers.persistence.persist_events( [(event, context)], backfilled=True ) ) else: self.get_success( self._storage_controllers.persistence.persist_event(event, context) ) return event
d8cc86eff484b6f570f55a5badb337080c6e4dcd
14
test_events.py
118
Remove redundant types from comments. (#14412) Remove type hints from comments which have been added as Python type hints. This helps avoid drift between comments and reality, as well as removing redundant information. Also adds some missing type hints which were simple to fill in.
73,153
0
169
75
23
249,821
26
synapse
14
tests/replication/slave/storage/test_events.py
Python
17
{ "docstring": "\n Returns:\n The event that was persisted.\n ", "language": "en", "n_whitespaces": 32, "n_words": 6, "vocab_size": 6 }
https://github.com/matrix-org/synapse.git
3
from_env
def from_env() -> Settings: # Since os.environ is a Dict[str, str] we can safely hash it by contents, but we # must be careful to avoid hashing a generator instead of a tuple cache_key = hash(tuple((key, value) for key, value in os.environ.items())) if cache_key not in _FROM_ENV_CACHE: _FROM_ENV_CACHE[cache_key] = Settings() return _FROM_ENV_CACHE[cache_key]
1d4218a287ef343f32f1e32482592b471be5df1d
14
settings.py
84
Move `prefect.settings` to `prefect.settings.from_env()`
10,794
0
81
51
44
53,410
52
prefect
11
src/prefect/settings.py
Python
11
{ "docstring": "\n Returns a settings object populated with default values and overrides from\n environment variables.\n\n Calls with the same environment return a cached object instead of reconstructing.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 21 }
https://github.com/PrefectHQ/prefect.git
5
nr_of_successful_buys
def nr_of_successful_buys(self) -> int: return len([o for o in self.orders if o.ft_order_side == 'buy' and o.status in NON_OPEN_EXCHANGE_STATES and o.filled > 0])
813a2cd23b0d9fa9f384c8c1f0b558ca5c4363e2
13
models.py
64
Add useful helper methods for adjust_trade_position implementation
34,265
0
74
39
20
148,484
22
freqtrade
10
freqtrade/persistence/models.py
Python
8
{ "docstring": "\n Helper function to count the number of buy orders that have been filled.\n :return: int count of buy orders that have been filled for this trade.\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 19 }
https://github.com/freqtrade/freqtrade.git
1
_get_cmap_norms
def _get_cmap_norms(): # Create a colormap and specify the levels it represents. cmap = mpl.colormaps["RdBu"].resampled(5) clevs = [-5., -2.5, -.5, .5, 1.5, 3.5] # Define norms for the colormaps. norms = dict() norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1) norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1) norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1) norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1) return cmap, norms
a17f4f3bd63e3ca3754f96d7db4ce5197720589b
13
test_colorbar.py
230
MNT: convert tests and internal usage way from using mpl.cm.get_cmap
23,565
0
100
151
43
109,392
67
matplotlib
10
lib/matplotlib/tests/test_colorbar.py
Python
9
{ "docstring": "\n Define a colormap and appropriate norms for each of the four\n possible settings of the extend keyword.\n\n Helper function for _colorbar_extension_shape and\n colorbar_extension_length.\n ", "language": "en", "n_whitespaces": 39, "n_words": 23, "vocab_size": 19 }
https://github.com/matplotlib/matplotlib.git
1
test_get_unread_push_actions_for_user_in_range
def test_get_unread_push_actions_for_user_in_range(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() # Create two events, one of which is a highlight. self.helper.send_event( room_id, type="m.room.message", content={"msgtype": "m.text", "body": "msg"}, tok=other_token, ) event_id = self.helper.send_event( room_id, type="m.room.message", content={"msgtype": "m.text", "body": user_id}, tok=other_token, )["event_id"] # Fetch unread actions for HTTP pushers. http_actions = self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_http( user_id, 0, 1000, 20 ) ) self.assertEqual(2, len(http_actions)) # Fetch unread actions for email pushers. email_actions = self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_email( user_id, 0, 1000, 20 ) ) self.assertEqual(2, len(email_actions)) # Send a receipt, which should clear any actions. self.get_success( self.store.insert_receipt( room_id, "m.read", user_id=user_id, event_ids=[event_id], thread_id=None, data={}, ) ) http_actions = self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_http( user_id, 0, 1000, 20 ) ) self.assertEqual([], http_actions) email_actions = self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_email( user_id, 0, 1000, 20 ) ) self.assertEqual([], email_actions)
2fae1a3f7862bf38cd0b52dfd3ea3ae76794d2b7
13
test_event_push_actions.py
381
Improve tests for get_unread_push_actions_for_user_in_range_*. (#13893) * Adds a docstring. * Reduces a small amount of duplicated code. * Improves tests.
72,985
0
638
245
66
249,545
122
synapse
26
tests/storage/test_event_push_actions.py
Python
49
{ "docstring": "Test getting unread push actions for HTTP and email pushers.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
4
screen
def screen(self) -> "Screen": # Get the node by looking up a chain of parents # Note that self.screen may not be the same as self.app.screen from .screen import Screen node = self while node and not isinstance(node, Screen): node = node._parent if not isinstance(node, Screen): raise NoScreen("{self} has no screen") return node
6a22c96a9e1831a7ac5889738bec8d5386fd111f
10
dom.py
87
screen fix
44,581
0
131
48
42
184,433
53
textual
7
src/textual/dom.py
Python
9
{ "docstring": "Get the screen that this node is contained within. Note that this may not be the currently active screen within the app.", "language": "en", "n_whitespaces": 21, "n_words": 22, "vocab_size": 17 }
https://github.com/Textualize/textual.git
1
test_graphical_lasso_cv_alphas_iterable
def test_graphical_lasso_cv_alphas_iterable(alphas_container_type): true_cov = np.array( [ [0.8, 0.0, 0.2, 0.0], [0.0, 0.4, 0.0, 0.0], [0.2, 0.0, 0.3, 0.1], [0.0, 0.0, 0.1, 0.7], ] ) rng = np.random.RandomState(0) X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) alphas = _convert_container([0.02, 0.03], alphas_container_type) GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X) # TODO: Remove `score` and `test_score` suffix in 1.2 @pytest.mark.parametrize("suffix", ["score", "test_score"]) @pytest.mark.filterwarnings("ignore:Key*:FutureWarning:sklearn")
abbee570f31a91243c22b1892e42056bb915c056
@pytest.mark.parametrize("suffix", ["score", "test_score"]) @pytest.mark.filterwarnings("ignore:Key*:FutureWarning:sklearn")
10
test_graphical_lasso.py
213
FIX accept NumPy arrays for alphas in GraphicalLassoCV (#22493)
75,504
1
132
160
47
258,976
56
scikit-learn
23
sklearn/covariance/tests/test_graphical_lasso.py
Python
13
{ "docstring": "Check that we can pass an array-like to `alphas`.\n\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/issues/22489\n ", "language": "en", "n_whitespaces": 22, "n_words": 13, "vocab_size": 13 }
https://github.com/scikit-learn/scikit-learn.git
1
test_change_view_with_show_delete_extra_context
def test_change_view_with_show_delete_extra_context(self): instance = UndeletableObject.objects.create(name="foo") response = self.client.get( reverse("admin:admin_views_undeletableobject_change", args=(instance.pk,)) ) self.assertNotContains(response, "deletelink")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
tests.py
83
Refs #33476 -- Reformatted code with Black.
52,138
0
59
48
12
207,867
13
django
14
tests/admin_views/tests.py
Python
6
{ "docstring": "\n The 'show_delete' context variable in the admin's change view controls\n the display of the delete button.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
https://github.com/django/django.git
1
test_meta_charset
def test_meta_charset(self) -> None: encodings = _get_html_media_encodings( b, "text/html", ) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) # A less well-formed version. encodings = _get_html_media_encodings( b, "text/html", ) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
7e91107be1a4287873266e588a3c5b415279f4c8
9
test_html_preview.py
111
Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey`
71,652
0
129
62
19
247,396
29
synapse
6
tests/rest/media/v1/test_html_preview.py
Python
22
{ "docstring": "A character encoding is found via the meta tag.\n <html>\n <head><meta charset=\"ascii\">\n </head>\n </html>\n \n <html>\n <head>< meta charset = ascii>\n </head>\n </html>\n ", "language": "en", "n_whitespaces": 93, "n_words": 22, "vocab_size": 18 }
https://github.com/matrix-org/synapse.git
2
get_current_settings
def get_current_settings() -> Settings: from prefect.context import ProfileContext profile = ProfileContext.get() if profile is not None: return profile.settings return get_settings_from_env()
95b47e807fa5ccc626a06efc2cced0d8ff8eadfa
8
settings.py
58
Rewrite temporary settings to use copy_with_update
11,185
0
42
34
18
55,038
20
prefect
9
src/prefect/settings.py
Python
10
{ "docstring": "\n Returns a settings object populated with values from the current profile or, if no\n profile is active, the environment.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
https://github.com/PrefectHQ/prefect.git
3
unset_existing_data
def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: "" for d in linked} frappe.db.set_value("Company", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ "Account", "Party Account", "Mode of Payment Account", "Tax Withholding Account", "Sales Taxes and Charges Template", "Purchase Taxes and Charges Template", ]: frappe.db.sql( .format(doctype) % (company) # nosec )
494bd9ef78313436f0424b918f200dab8fc7c20b
13
chart_of_accounts_importer.py
140
style: format code with black
13,733
0
46
82
48
64,834
65
erpnext
13
erpnext/accounts/doctype/chart_of_accounts_importer/chart_of_accounts_importer.py
Python
19
{ "docstring": "select fieldname from tabDocField\n\t\twhere fieldtype=\"Link\" and options=\"Account\" and parent=\"Company\"delete from `tab{0}` where `company`=\"%s\"", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 11 }
https://github.com/frappe/erpnext.git
7
__getitem__
def __getitem__(self, key): getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key, warn_float=True) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new(result, name=self._name) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: deprecate_ndim_indexing(result) if hasattr(result, "_ndarray"): # error: Item "ndarray[Any, Any]" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] # i.e. NDArrayBackedExtensionArray # Unpack to ndarray for MPL compat return result._ndarray # type: ignore[union-attr] return result # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name)
d603d43df2057ecdf74010d9dadc735e37f8f7b5
11
base.py
236
TYP: Ignore numpy related issues (#45244)
39,411
0
511
139
123
163,265
178
pandas
27
pandas/core/indexes/base.py
Python
17
{ "docstring": "\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n ", "language": "en", "n_whitespaces": 81, "n_words": 38, "vocab_size": 36 }
https://github.com/pandas-dev/pandas.git
6
del_param
def del_param(self, param, header='content-type', requote=True): if header not in self: return new_ctype = '' for p, v in self.get_params(header=header, unquote=requote): if p.lower() != param.lower(): if not new_ctype: new_ctype = _formatparam(p, v, requote) else: new_ctype = SEMISPACE.join([new_ctype, _formatparam(p, v, requote)]) if new_ctype != self.get(header): del self[header] self[header] = new_ctype
8198943edd73a363c266633e1aa5b2a9e9c9f526
18
message.py
177
add python 3.10.4 for windows
57,063
0
242
113
32
223,782
48
XX-Net
15
python3.10.4/Lib/email/message.py
Python
14
{ "docstring": "Remove the given parameter completely from the Content-Type header.\n\n The header will be re-written in place without the parameter or its\n value. All values will be quoted as necessary unless requote is\n False. Optional header specifies an alternative to the Content-Type\n header.\n ", "language": "en", "n_whitespaces": 78, "n_words": 42, "vocab_size": 33 }
https://github.com/XX-net/XX-Net.git
3
set_positions
def set_positions(self, posA, posB): if posA is not None: self._posA_posB[0] = posA if posB is not None: self._posA_posB[1] = posB self.stale = True
03a0b5ea238014ba87f74ef766928287726aa00a
10
patches.py
67
Doc: Fix grammar and spelling
24,044
0
73
43
15
110,304
23
matplotlib
6
lib/matplotlib/patches.py
Python
6
{ "docstring": "\n Set the start and end positions of the connecting path.\n\n Parameters\n ----------\n posA, posB : None, tuple\n (x, y) coordinates of arrow tail and arrow head respectively. If\n `None` use current value.\n ", "language": "en", "n_whitespaces": 90, "n_words": 32, "vocab_size": 28 }
https://github.com/matplotlib/matplotlib.git
6
send
def send(self, msg, timeout=None, callback=None, raw=False, future=None, tries=3): message_id = self._message_id() header = {"mid": message_id} if future is None: future = salt.ext.tornado.concurrent.Future() future.tries = tries future.attempts = 0 future.timeout = timeout if callback is not None:
43277294a3454e5dcd9079e005f747bf880801f6
13
tcp.py
127
Test fix
54,046
0
115
191
27
215,599
36
salt
17
salt/transport/tcp.py
Python
25
{ "docstring": "\n Send given message, and return a future\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/saltstack/salt.git
18
histogramdd
def histogramdd(sample, bins=10, range=None, density=None, weights=None): try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = np.atleast_2d(sample).T N, D = sample.shape nbin = np.empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = np.asarray(weights) try: M = len(bins) if M != D: raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # normalize the range argument if range is None: range = (None,) * D elif len(range) != D: raise ValueError('range argument must have one entry per dimension') # Create edge arrays for i in _range(D): if np.ndim(bins[i]) == 0: if bins[i] < 1: raise ValueError( '`bins[{}]` must be positive, when an integer'.format(i)) smin, smax = _get_outer_edges(sample[:,i], range[i]) try: n = operator.index(bins[i]) except TypeError as e: raise TypeError( "`bins[{}]` must be an integer, when a scalar".format(i) ) from e edges[i] = np.linspace(smin, smax, n + 1) elif np.ndim(bins[i]) == 1: edges[i] = np.asarray(bins[i]) if np.any(edges[i][:-1] > edges[i][1:]): raise ValueError( '`bins[{}]` must be monotonically increasing, when an array' .format(i)) else: raise ValueError( '`bins[{}]` must be a scalar or 1d array'.format(i)) nbin[i] = len(edges[i]) + 1 # includes an outlier on each end dedges[i] = np.diff(edges[i]) # Compute the bin number each sample falls into. Ncount = tuple( # avoid np.digitize to work around gh-11022 np.searchsorted(edges[i], sample[:, i], side='right') for i in _range(D) ) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in _range(D): # Find which points are on the rightmost edge. on_edge = (sample[:, i] == edges[i][-1]) # Shift these points one bin to the left. Ncount[i][on_edge] -= 1 # Compute the sample indices in the flattened histogram matrix. # This raises an error if the array is too large. xy = np.ravel_multi_index(Ncount, nbin) # Compute the number of repetitions in xy and assign it to the # flattened histmat. hist = np.bincount(xy, weights, minlength=nbin.prod()) # Shape into a proper matrix hist = hist.reshape(nbin) # This preserves the (bad) behavior observed in gh-7845, for now. hist = hist.astype(float, casting='safe') # Remove outliers (indices 0 and -1 for each dimension). core = D*(slice(1, -1),) hist = hist[core] if density: # calculate the probability density function s = hist.sum() for i in _range(D): shape = np.ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges
2215054472616df563faa4613734426c790d4217
17
histograms.py
914
DEP: Remove `normed=` keyword argument from histogroms The normed keyword argument has been deprecated for a long time. This removes it, replacing its position with the new density argument.
38,643
0
1,104
570
244
160,494
454
numpy
58
numpy/lib/histograms.py
Python
71
{ "docstring": "\n Compute the multidimensional histogram of some data.\n\n Parameters\n ----------\n sample : (N, D) array, or (D, N) array_like\n The data to be histogrammed.\n\n Note the unusual interpretation of sample when an array_like:\n\n * When an array, each row is a coordinate in a D-dimensional space -\n such as ``histogramdd(np.array([p1, p2, p3]))``.\n * When an array_like, each element is the list of values for single\n coordinate - such as ``histogramdd((X, Y, Z))``.\n\n The first form should be preferred.\n\n bins : sequence or int, optional\n The bin specification:\n\n * A sequence of arrays describing the monotonically increasing bin\n edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... =bins)\n * The number of bins for all dimensions (nx=ny=...=bins).\n\n range : sequence, optional\n A sequence of length D, each an optional (lower, upper) tuple giving\n the outer bin edges to be used if the edges are not given explicitly in\n `bins`.\n An entry of None in the sequence results in the minimum and maximum\n values being used for the corresponding dimension.\n The default, None, is equivalent to passing a tuple of D None values.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_volume``.\n weights : (N,) array_like, optional\n An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.\n Weights are normalized to 1 if density is True. If density is False,\n the values of the returned histogram are equal to the sum of the\n weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray\n The multidimensional histogram of sample x. See density and weights\n for the different possible semantics.\n edges : list\n A list of D arrays describing the bin edges for each dimension.\n\n See Also\n --------\n histogram: 1-D histogram\n histogram2d: 2-D histogram\n\n Examples\n --------\n >>> r = np.random.randn(100,3)\n >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))\n >>> H.shape, edges[0].size, edges[1].size, edges[2].size\n ((5, 8, 4), 6, 9, 5)\n\n ", "language": "en", "n_whitespaces": 612, "n_words": 340, "vocab_size": 182 }
https://github.com/numpy/numpy.git
5
_check_list_display_links
def _check_list_display_links(self, obj): from django.contrib.admin.options import ModelAdmin if obj.list_display_links is None: return [] elif not isinstance(obj.list_display_links, (list, tuple)): return must_be( "a list, a tuple, or None", option="list_display_links", obj=obj, id="admin.E110", ) # Check only if ModelAdmin.get_list_display() isn't overridden. elif obj.get_list_display.__func__ is ModelAdmin.get_list_display: return list( chain.from_iterable( self._check_list_display_links_item( obj, field_name, "list_display_links[%d]" % index ) for index, field_name in enumerate(obj.list_display_links) ) ) return []
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
checks.py
168
Refs #33476 -- Reformatted code with Black.
50,325
0
334
107
50
203,351
60
django
23
django/contrib/admin/checks.py
Python
21
{ "docstring": "Check that list_display_links is a unique subset of list_display.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/django/django.git
3
handle_trial_end
def handle_trial_end(self, data): hyper_params = nni.load(data['hyper_params']) if self.is_created_in_previous_exp(hyper_params['parameter_id']): # The end of the recovered trial is ignored return logger.debug('Tuner handle trial end, result is %s', data) self._handle_trial_end(hyper_params['parameter_id']) if data['trial_job_id'] in self.job_id_para_id_map: del self.job_id_para_id_map[data['trial_job_id']]
bcc640c4e5e687a03fe21503692dad96e0b97fa7
10
bohb_advisor.py
119
[nas] fix issue introduced by the trial recovery feature (#5109)
24,917
0
108
68
30
113,472
33
nni
11
nni/algorithms/hpo/bohb_advisor/bohb_advisor.py
Python
8
{ "docstring": "receive the information of trial end and generate next configuaration.\n\n Parameters\n ----------\n data: dict()\n it has three keys: trial_job_id, event, hyper_params\n trial_job_id: the id generated by training service\n event: the job's state\n hyper_params: the hyperparameters (a string) generated and returned by tuner\n ", "language": "en", "n_whitespaces": 114, "n_words": 42, "vocab_size": 36 }
https://github.com/microsoft/nni.git
26
handle_merge
def handle_merge(self, loader, conflicts): if self.interactive: questioner = InteractiveMigrationQuestioner(prompt_output=self.stdout) else: questioner = MigrationQuestioner(defaults={'ask_merge': True}) for app_label, migration_names in conflicts.items(): # Grab out the migrations in question, and work out their # common ancestor. merge_migrations = [] for migration_name in migration_names: migration = loader.get_migration(app_label, migration_name) migration.ancestry = [ mig for mig in loader.graph.forwards_plan((app_label, migration_name)) if mig[0] == migration.app_label ] merge_migrations.append(migration)
0ab58c120939093fea90822f376e1866fc714d1f
15
makemigrations.py
169
Refs #29026 -- Allowed customizing InteractiveMigrationQuestioner's prompt destination. Previously, the questioner did not obey the value of stdout provided to the command.
50,163
0
251
520
45
202,901
59
django
23
django/core/management/commands/makemigrations.py
Python
66
{ "docstring": "\n Handles merging together conflicted migrations interactively,\n if it's safe; otherwise, advises on how to fix it.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
https://github.com/django/django.git
11
line_search
def line_search(self, X, y, sample_weight): # line search parameters beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11 eps = 16 * np.finfo(self.loss_value.dtype).eps t = 1 # step size # gradient_times_newton = self.gradient @ self.coef_newton # was computed in inner_solve. armijo_term = sigma * self.gradient_times_newton _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw( self.coef_newton, X ) self.coef_old = self.coef self.loss_value_old = self.loss_value self.gradient_old = self.gradient # np.sum(np.abs(self.gradient_old)) sum_abs_grad_old = -1 is_verbose = self.verbose >= 2 if is_verbose: print(" Backtracking Line Search") print(f" eps=10 * finfo.eps={eps}") for i in range(21): # until and including t = beta**20 ~ 1e-6 self.coef = self.coef_old + t * self.coef_newton raw = self.raw_prediction + t * raw_prediction_newton self.loss_value, self.gradient = self.linear_loss.loss_gradient( coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=raw, ) # Note: If coef_newton is too large, loss_gradient may produce inf values, # potentially accompanied by a RuntimeWarning. # This case will be captured by the Armijo condition. # 1. Check Armijo / sufficient decrease condition. # The smaller (more negative) the better. loss_improvement = self.loss_value - self.loss_value_old check = loss_improvement <= t * armijo_term if is_verbose: print( f" line search iteration={i+1}, step size={t}\n" f" check loss improvement <= armijo term: {loss_improvement} " f"<= {t * armijo_term} {check}" ) if check: break # 2. Deal with relative loss differences around machine precision. tiny_loss = np.abs(self.loss_value_old * eps) check = np.abs(loss_improvement) <= tiny_loss if is_verbose: print( " check loss |improvement| <= eps * |loss_old|:" f" {np.abs(loss_improvement)} <= {tiny_loss} {check}" ) if check: if sum_abs_grad_old < 0: sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) # 2.1 Check sum of absolute gradients as alternative condition. sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) check = sum_abs_grad < sum_abs_grad_old if is_verbose: print( " check sum(|gradient|) < sum(|gradient_old|): " f"{sum_abs_grad} < {sum_abs_grad_old} {check}" ) if check: break t *= beta else: warnings.warn( f"Line search of Newton solver {self.__class__.__name__} at iteration " f"#{self.iteration} did no converge after 21 line search refinement " "iterations. It will now resort to lbfgs instead.", ConvergenceWarning, ) if self.verbose: print(" Line search did not converge and resorts to lbfgs instead.") self.use_fallback_lbfgs_solve = True return self.raw_prediction = raw
ff9344f3d8d11d38fa3a2497199113e5bac9537c
16
_newton_solver.py
645
FEA add (single) Cholesky Newton solver to GLMs (#24637) * FEA add NewtonSolver, CholeskyNewtonSolver and QRCholeskyNewtonSolver * ENH better singular hessian special solve * CLN fix some typos found by reviewer * TST assert ConvergenceWarning is raised * MNT add BaseCholeskyNewtonSolver * WIP colinear design in GLMs * FIX _solve_singular * FIX false unpacking in * TST add tests for unpenalized GLMs * TST fix solutions of glm_dataset * ENH add SVDFallbackSolver * CLN remove SVDFallbackSolver * ENH use gradient step for singular hessians * ENH print iteration number in warnings * TST improve test_linalg_warning_with_newton_solver * CLN LinAlgWarning fron scipy.linalg * ENH more robust hessian * ENH increase maxls for lbfgs to make it more robust * ENH add hessian_warning for too many negative hessian values * CLN some warning messages * ENH add lbfgs_step * ENH use lbfgs_step for hessian_warning * TST make them pass * TST tweek rtol for lbfgs * TST add rigoros test for GLMs * TST improve test_warm_start * ENH improve lbfgs options for better convergence * CLN fix test_warm_start * TST fix assert singular values in datasets * CLN address most review comments * ENH enable more vebosity levels for lbfgs * DOC add whatsnew * CLN remove xfail and clean a bit * CLN docstring about minimum norm * More informative repr for the glm_dataset fixture cases * Forgot to run black * CLN remove unnecessary filterwarnings * CLN address review comments * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN add comment for lbfgs ftol=64 * machine precision * CLN XXX code comment * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN link issue and remove code snippet in comment * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN add catch_warnings * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Trigger with -Werror [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * ENH increase maxls to 50 * [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Revert "Trigger with -Werror [all random seeds]" This reverts commit 99f4cf99ca41b4ad2bdad537ad60f936970e3a88. * TST add catch_warnings to filterwarnings * TST adapt tests for newton solvers * CLN cleaner gradient step with gradient_times_newton * DOC add whatsnew * ENH always use lbfgs as fallback * TST adapt rtol * TST fix test_linalg_warning_with_newton_solver * CLN address some review comments * Improve tests related to convergence warning on collinear data * overfit -> fit * Typo in comment * Apply suggestions from code review * ENH fallback_lbfgs_solve - Do not use lbfgs steps, fall back complete to lbfgs * ENH adapt rtol * Improve test_linalg_warning_with_newton_solver * Better comments * Fixed Hessian casing and improved warning messages * [all random seeds] test_linalg_warning_with_newton_solver * Ignore ConvergenceWarnings for now if convergence is good * CLN remove counting of warnings * ENH fall back to lbfgs if line search did not converge * DOC better comment on performance bottleneck * Update GLM related examples to use the new solver * CLN address reviewer comments * EXA improve some wordings * CLN do not pop "solver in parameter constraints * CLN fix typos * DOC fix docstring * CLN remove solver newton-qr-cholesky * DOC update PR number in whatsnew * CLN address review comments * CLN remove unnecessary catch_warnings * CLN address some review comments * DOC more precise whatsnew * CLN use init_zero_coef * CLN use and test init_zero_coef * CLN address some review comments * CLN mark NewtonSolver as private by leading underscore * CLN exact comments for inner_solve * TST add test_newton_solver_verbosity * TST extend test_newton_solver_verbosity * TST logic in test_glm_regression_unpenalized * TST use count_nonzero * CLN remove super rare line search checks * MNT move Newton solver to new file _newton_solver.py Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
76,792
0
1,364
350
201
261,382
339
scikit-learn
52
sklearn/linear_model/_glm/_newton_solver.py
Python
70
{ "docstring": "Backtracking line search.\n\n Sets:\n - self.coef_old\n - self.coef\n - self.loss_value_old\n - self.loss_value\n - self.gradient_old\n - self.gradient\n - self.raw_prediction\n ", "language": "en", "n_whitespaces": 109, "n_words": 18, "vocab_size": 12 }
https://github.com/scikit-learn/scikit-learn.git
3
extrema_bounding
def extrema_bounding(G, compute="diameter"): import warnings msg = "extrema_bounding is deprecated and will be removed in networkx 3.0\n" # NOTE: _extrema_bounding does input checking, so it is skipped here if compute in {"diameter", "radius", "periphery", "center"}: msg += f"Use nx.{compute}(G, usebounds=True) instead." if compute == "eccentricities": msg += f"Use nx.eccentricity(G) instead." warnings.warn(msg, DeprecationWarning, stacklevel=2) return _extrema_bounding(G, compute=compute)
2ef5c096fb870638fd565c62c84999364c21beaf
10
distance_measures.py
116
Deprecate extrema bounding (#5422) * Add public wrapper and convert impl to private. * Add deprecation warning to public fn. * Add test for deprecation warning. * Add deprecation note. * Add release note.
41,903
0
94
62
47
176,440
56
networkx
9
networkx/algorithms/distance_measures.py
Python
9
{ "docstring": "Compute requested extreme distance metric of undirected graph G\n\n .. deprecated:: 2.8\n\n extrema_bounding is deprecated and will be removed in NetworkX 3.0.\n Use the corresponding distance measure with the `usebounds=True` option\n instead.\n\n Computation is based on smart lower and upper bounds, and in practice\n linear in the number of nodes, rather than quadratic (except for some\n border cases such as complete graphs or circle shaped graphs).\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph\n\n compute : string denoting the requesting metric\n \"diameter\" for the maximal eccentricity value,\n \"radius\" for the minimal eccentricity value,\n \"periphery\" for the set of nodes with eccentricity equal to the diameter,\n \"center\" for the set of nodes with eccentricity equal to the radius,\n \"eccentricities\" for the maximum distance from each node to all other nodes in G\n\n Returns\n -------\n value : value of the requested metric\n int for \"diameter\" and \"radius\" or\n list of nodes for \"center\" and \"periphery\" or\n dictionary of eccentricity values keyed by node for \"eccentricities\"\n\n Raises\n ------\n NetworkXError\n If the graph consists of multiple components\n ValueError\n If `compute` is not one of \"diameter\", \"radius\", \"periphery\", \"center\", or \"eccentricities\".\n Notes\n -----\n This algorithm was proposed in the following papers:\n\n F.W. Takes and W.A. Kosters, Determining the Diameter of Small World\n Networks, in Proceedings of the 20th ACM International Conference on\n Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011.\n doi: https://doi.org/10.1145/2063576.2063748\n\n F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of\n Large Graphs, Algorithms 6(1): 100-118, 2013.\n doi: https://doi.org/10.3390/a6010100\n\n M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes,\n Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected)\n Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015.\n doi: https://doi.org/10.1016/j.tcs.2015.02.033\n ", "language": "en", "n_whitespaces": 456, "n_words": 280, "vocab_size": 175 }
https://github.com/networkx/networkx.git
8
_GetTextInside
def _GetTextInside(text, start_pattern): r # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(matching_punctuation.itervalues()) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) if not match: # start_pattern not found in text. return None start_position = match.end(0) assert start_position > 0, ( 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') # Stack of closing punctuations we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: # A closing punctuation without matching opening punctuations. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: # Opening punctuations left without matching close-punctuations. return None # punctuations match. return text[start_position:position - 1] # Patterns for matching call-by-reference parameters. # # Supports nested templates up to 2 levels deep using this messy pattern: # < (?: < (?: < [^<>]* # > # | [^<>] )* # > # | [^<>] )* # > _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* _RE_PATTERN_TYPE = ( r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' r'(?:\w|' r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' r'::)+') # A call-by-reference parameter ends with '& identifier'. _RE_PATTERN_REF_PARAM = re.compile( r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') # A call-by-const-reference parameter either ends with 'const& identifier' # or looks like 'const type& identifier' when 'type' is atomic. _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
cc4d0564756ca067516f71718a3d135996525909
14
cpp_lint.py
397
Balanced joint maximum mean discrepancy for deep transfer learning
12,101
0
405
173
164
60,372
282
transferlearning
23
code/deep/BJMMD/caffe/scripts/cpp_lint.py
Python
43
{ "docstring": "Retrieves all the text between matching open and close parentheses.\n\n Given a string of lines and a regular expression string, retrieve all the text\n following the expression and between opening punctuation symbols like\n (, [, or {, and the matching close-punctuation symbol. This properly nested\n occurrences of the punctuations, so for the text like\n printf(a(), b(c()));\n a call to _GetTextInside(text, r'printf\\(') will return 'a(), b(c())'.\n start_pattern must match string having an open punctuation symbol at the end.\n\n Args:\n text: The lines to extract text. Its comments and strings must be elided.\n It can be single line and can span multiple lines.\n start_pattern: The regexp string indicating where to start extracting\n the text.\n Returns:\n The extracted text.\n None if either the opening string or ending punctuation could not be found.\n ", "language": "en", "n_whitespaces": 181, "n_words": 129, "vocab_size": 87 }
https://github.com/jindongwang/transferlearning.git
2
rsa_key_size
def rsa_key_size(self) -> Optional[int]: key = self._private_key() if isinstance(key, RSAPrivateKey): return key.key_size return None
212c2ba990758cb9acd2b200e55302534988089a
8
storage.py
53
error out when --reuse-key conflicts with other flags (#9262) * error out when --reuse-key conflicts with other flags * add unit test * add integration tests * lint
45,648
0
53
32
13
186,897
14
certbot
9
certbot/certbot/_internal/storage.py
Python
9
{ "docstring": "\n :returns: If the private key is an RSA key, its size.\n :rtype: int\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/certbot/certbot.git
1
test_get_user_ip_and_agents_combined_data
def test_get_user_ip_and_agents_combined_data(self) -> None: self.reactor.advance(12345678) user_id = "@user:id" user = UserID.from_string(user_id) # Insert user IPs self.get_success( self.store.insert_client_ip( user_id, "access_token", "ip_1", "user_agent_1", "MY_DEVICE_1" ) ) self.get_success( self.store.insert_client_ip( user_id, "access_token", "ip_2", "user_agent_2", "MY_DEVICE_2" ) ) # Trigger the storage loop and wait for the rate limiting period to be over self.reactor.advance(10 + LAST_SEEN_GRANULARITY / 1000) # Update the user agent for the second device, without running the storage loop self.get_success( self.store.insert_client_ip( user_id, "access_token", "ip_2", "user_agent_3", "MY_DEVICE_2" ) ) # Check that the new IP and user agent has not been stored yet db_result = self.get_success( self.store.db_pool.simple_select_list( table="user_ips", keyvalues={}, retcols=("access_token", "ip", "user_agent", "last_seen"), ), ) self.assertEqual( db_result, [ { "access_token": "access_token", "ip": "ip_1", "user_agent": "user_agent_1", "last_seen": 12345678000, }, { "access_token": "access_token", "ip": "ip_2", "user_agent": "user_agent_2", "last_seen": 12345678000, }, ], ) # Check that data from the database and memory are combined together correctly self.assertCountEqual( self.get_success(self.store.get_user_ip_and_agents(user)), [ { "access_token": "access_token", "ip": "ip_1", "user_agent": "user_agent_1", "last_seen": 12345678000, }, { "access_token": "access_token", "ip": "ip_2", "user_agent": "user_agent_3", "last_seen": 12345688000 + LAST_SEEN_GRANULARITY, }, ], )
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
13
test_client_ips.py
451
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
73,278
0
989
250
90
250,114
167
synapse
21
tests/storage/test_client_ips.py
Python
64
{ "docstring": "Test that `get_user_ip_and_agents` combines persisted and unpersisted data\n together correctly\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
_show_diff_helper
def _show_diff_helper(self, frame_data, expected_frame_data): import matplotlib.gridspec as gridspec # type: ignore import matplotlib.pyplot as plt gs = gridspec.GridSpec(2, 2) fig = plt.figure() fig.suptitle(f"Test for {str(self.scene).replace('Test', '')}", fontsize=16) ax = fig.add_subplot(gs[0, 0]) ax.imshow(frame_data) ax.set_title("Generated :") ax = fig.add_subplot(gs[0, 1]) ax.imshow(expected_frame_data) ax.set_title("Expected :") ax = fig.add_subplot(gs[1, :]) diff_im = expected_frame_data.copy() diff_im = np.where( frame_data != np.array([0, 0, 0, 255]), np.array([0, 255, 0, 255], dtype="uint8"), np.array([0, 0, 0, 255], dtype="uint8"), ) # Set any non-black pixels to green np.putmask( diff_im, expected_frame_data != frame_data, np.array([255, 0, 0, 255], dtype="uint8"), ) # Set any different pixels to red ax.imshow(diff_im, interpolation="nearest") ax.set_title("Differences summary : (green = same, red = different)") plt.show() plt.savefig(f"{self.scene}.png")
c4217731e08470d5a56cf02cf76cae01c03fb78f
14
GraphicalUnitTester.py
407
Added MyPy Support (#1972) * MyPy Support * MyPy Hook * Removing MyPy Hook * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete __init__.pyi * Delete color.pyi * Update .mypy.ini Co-authored-by: Christopher Besch <[email protected]> * changes * quick fix * MyPy Hook * MyPy Hook Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christopher Besch <[email protected]>
46,030
0
329
240
69
189,389
106
manim
31
tests/utils/GraphicalUnitTester.py
Python
28
{ "docstring": "Will visually display with matplotlib differences between frame generated and the one expected.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/ManimCommunity/manim.git
2
test_multi_sync_same_node
def test_multi_sync_same_node(ray_start_2_cpus, temp_data_dirs, num_workers): tmp_source, tmp_target = temp_data_dirs assert_file(True, tmp_source, "level0.txt") assert_file(True, tmp_source, "subdir/level1.txt") node_ip = ray.util.get_node_ip_address() futures = [ _sync_dir_on_same_node( ip=node_ip, source_path=tmp_source, target_path=tmp_target, return_futures=True, ) for _ in range(num_workers) ] ray.get(futures) assert_file(True, tmp_target, "level0.txt") assert_file(True, tmp_target, "subdir/level1.txt") @pytest.mark.parametrize("num_workers", [1, 8])
6313ddc47cf9df4df8c8907997df559850a1b874
@pytest.mark.parametrize("num_workers", [1, 8])
10
test_util_file_transfer.py
168
[tune] Refactor Syncer / deprecate Sync client (#25655) This PR includes / depends on #25709 The two concepts of Syncer and SyncClient are confusing, as is the current API for passing custom sync functions. This PR refactors Tune's syncing behavior. The Sync client concept is hard deprecated. Instead, we offer a well defined Syncer API that can be extended to provide own syncing functionality. However, the default will be to use Ray AIRs file transfer utilities. New API: - Users can pass `syncer=CustomSyncer` which implements the `Syncer` API - Otherwise our off-the-shelf syncing is used - As before, syncing to cloud disables syncing to driver Changes: - Sync client is removed - Syncer interface introduced - _DefaultSyncer is a wrapper around the URI upload/download API from Ray AIR - SyncerCallback only uses remote tasks to synchronize data - Rsync syncing is fully depracated and removed - Docker and kubernetes-specific syncing is fully deprecated and removed - Testing is improved to use `file://` URIs instead of mock sync clients
32,557
1
135
92
31
141,978
41
ray
23
python/ray/tune/tests/test_util_file_transfer.py
Python
17
{ "docstring": "Check that multiple competing syncs to the same node+dir don't interfere", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ray-project/ray.git
5
yarn_check
def yarn_check(file_list): if file_list is None or os.environ.get("SKIP_YARN_CHECK"): return False if "package.json" in file_list and "yarn.lock" not in file_list: sys.stdout.write( "\033[33m" + + "\033[0m" + "\n" ) return True return False
09e1a361f3590dcf345ba7b2a1e35b74cba97ccb
13
engine.py
98
fix(dx): Check for prettier verison in dependencies also (#33100)
19,455
0
116
50
23
97,663
31
sentry
8
src/sentry/lint/engine.py
Python
16
{ "docstring": "\n Checks if package.json was modified WITHOUT a corresponding change in the Yarn\n lockfile. This can happen if a user manually edited package.json without running Yarn.\n\n This is a user prompt right now because there ARE cases where you can touch package.json\n without a Yarn lockfile change, e.g. Jest config changes, license changes, etc.\n Warning: package.json modified without accompanying yarn.lock modifications.\n\nIf you updated a dependency/devDependencies in package.json, you must run `yarn install` to update the lockfile.\n\nTo skip this check, run `SKIP_YARN_CHECK=1 git commit [options]`", "language": "en", "n_whitespaces": 98, "n_words": 85, "vocab_size": 63 }
https://github.com/getsentry/sentry.git
2
_get_base_market_data_info
def _get_base_market_data_info(self) -> Union[Dict[str, Any], Any]: market_dct = {} market_data = self.coin.get("market_data", {}) for stat in [ "total_supply", "max_supply", "circulating_supply", "price_change_percentage_24h", "price_change_percentage_7d", "price_change_percentage_30d", ]: market_dct[stat] = market_data.get(stat) prices = create_dictionary_with_prefixes( ["current_price"], market_data, DENOMINATION ) market_dct.update(prices) return market_dct
59d8b36bb0467a1a99513b10e8b8471afaa56fd6
10
pycoingecko_model.py
139
[IMPROVE] Fix Docstring formatting/Fix missing, incomplete type hints (#3412) * Fixes * Update stocks_helper.py * update git-actions set-output to new format * Update stocks_helper.py * Update terminal_helper.py * removed LineAnnotateDrawer from qa_view * lint * few changes * updates * sdk auto gen modules done * Update stocks_helper.py * updates to changed imports, and remove first sdk_modules * Update generate_sdk.py * Update generate_sdk.py * pylint * revert stocks_helper * Update generate_sdk.py * Update sdk.py * Update generate_sdk.py * full auto generation, added sdk.py/controllers creation * missed enable forecasting * added running black in subprocess after sdk files generation completes * removed deleted sdk_arg_logger * comment out tests * property doc fix * clean up * Update generate_sdk.py * make trailmap classes useable for doc generation * Update generate_sdk.py * added lineon to trailmap class for linking to func in markdown * changed lineon to dict * added full_path to trailmap for linking in docs * updated portfolio * feat: initial files * feat: added meta head * feat: added funcdef * added func_def to trailmap attributes for markdown in docs, added missing type hints to covid functions * feat: added view and merged with jaun * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * init * fix returns * fix: random stuff * fix: random * fixed encoding issue on windows * fix: generate tabs * update * Update generate_sdk_markdown.py * Create .pydocstyle.ini * added type hint classes for views * fixes * alt, ba * alt-economy * Update finviz_compare_model.py * fixs * Update substack_model.py * Update generate_sdk.py * last of my section * porfolio * po * Update optimizer_model.py * fixing more things * few more * keys done * update * fixes * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * mypy forecast fix * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * fixes * forecast fixes * one more fix * Update coinbase_model.py * Update generate_sdk_markdown.py Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: James Maslek <[email protected]> Co-authored-by: jose-donato <[email protected]> Co-authored-by: andrewkenreich <[email protected]>
85,888
0
188
84
33
286,572
37
OpenBBTerminal
15
openbb_terminal/cryptocurrency/due_diligence/pycoingecko_model.py
Python
24
{ "docstring": "Helper method that fetches all the base market/price information about given coin. [Source: CoinGecko]\n\n Returns\n ----------\n Dict[str, Any]\n All market related information for given coin\n ", "language": "en", "n_whitespaces": 64, "n_words": 25, "vocab_size": 23 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
test_dataset_shard_with_task_parallelization
def test_dataset_shard_with_task_parallelization(self): config = { "input": "dataset", "input_config": { "format": "json", "paths": self.dset_path, "parallelism": 10, }, } NUM_WORKERS = 4 _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS) assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] )
569fe0109629048d08e1d9e023f7769f10bd2244
11
test_dataset_reader.py
143
[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)
27,737
0
196
86
38
124,997
44
ray
16
rllib/offline/tests/test_dataset_reader.py
Python
16
{ "docstring": "Tests whether the dataset_shard function works correctly with parallelism\n for reading the dataset.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
https://github.com/ray-project/ray.git
1
homogeneity_score
def homogeneity_score(labels_true, labels_pred): return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
4253eace9893eb6aef36ca631e7978b6a8808fbc
8
_supervised.py
29
DOC Ensures that homogeneity_score passes numpydoc validation (#23006)
75,819
0
12
18
6
259,555
6
scikit-learn
4
sklearn/metrics/cluster/_supervised.py
Python
2
{ "docstring": "Homogeneity metric of a cluster labeling given a ground truth.\n\n A clustering result satisfies homogeneity if all of its clusters\n contain only data points which are members of a single class.\n\n This metric is independent of the absolute values of the labels:\n a permutation of the class or cluster label values won't change the\n score value in any way.\n\n This metric is not symmetric: switching ``label_true`` with ``label_pred``\n will return the :func:`completeness_score` which will be different in\n general.\n\n Read more in the :ref:`User Guide <homogeneity_completeness>`.\n\n Parameters\n ----------\n labels_true : int array, shape = [n_samples]\n Ground truth class labels to be used as a reference.\n\n labels_pred : array-like of shape (n_samples,)\n Cluster labels to evaluate.\n\n Returns\n -------\n homogeneity : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.\n\n See Also\n --------\n completeness_score : Completeness metric of cluster labeling.\n v_measure_score : V-Measure (NMI with arithmetic mean option).\n\n References\n ----------\n\n .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A\n conditional entropy-based external cluster evaluation measure\n <https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_\n\n Examples\n --------\n\n Perfect labelings are homogeneous::\n\n >>> from sklearn.metrics.cluster import homogeneity_score\n >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])\n 1.0\n\n Non-perfect labelings that further split classes into more clusters can be\n perfectly homogeneous::\n\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))\n 1.000000\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))\n 1.000000\n\n Clusters that include samples from different classes do not make for an\n homogeneous labeling::\n\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))\n 0.0...\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))\n 0.0...\n ", "language": "en", "n_whitespaces": 443, "n_words": 263, "vocab_size": 162 }
https://github.com/scikit-learn/scikit-learn.git
13
config
def config(self, s): from traitlets.config.loader import Config # some IPython objects are Configurable, but do not yet have # any configurable traits. Exclude them from the effects of # this magic, as their presence is just noise: configurables = sorted(set([ c for c in self.shell.configurables if c.__class__.class_traits(config=True) ]), key=lambda x: x.__class__.__name__) classnames = [ c.__class__.__name__ for c in configurables ] line = s.strip() if not line: # print available configurable names print("Available objects for config:") for name in classnames: print(" ", name) return elif line in classnames: # `%config TerminalInteractiveShell` will print trait info for # TerminalInteractiveShell c = configurables[classnames.index(line)] cls = c.__class__ help = cls.class_get_help(c) # strip leading '--' from cl-args: help = re.sub(re.compile(r'^--', re.MULTILINE), '', help) print(help) return elif reg.match(line): cls, attr = line.split('.') return getattr(configurables[classnames.index(cls)],attr) elif '=' not in line: msg = "Invalid config statement: %r, "\ "should be `Class.trait = value`." ll = line.lower() for classname in classnames: if ll == classname.lower(): msg = msg + '\nDid you mean %s (note the case)?' % classname break raise UsageError( msg % line) # otherwise, assume we are setting configurables. # leave quotes on args when splitting, because we want # unquoted args to eval in user_ns cfg = Config() exec("cfg."+line, self.shell.user_ns, locals()) for configurable in configurables: try: configurable.update_config(cfg) except Exception as e: error(e)
93c8b4d5380d861bf77c590660b93e495bef893b
16
config.py
463
Update ipdoctest test
52,525
0
775
276
148
208,792
216
ipython
48
IPython/core/magics/config.py
Python
38
{ "docstring": "configure IPython\n\n %config Class[.trait=value]\n\n This magic exposes most of the IPython config system. Any\n Configurable class should be able to be configured with the simple\n line::\n\n %config Class.trait=value\n\n Where `value` will be resolved in the user's namespace, if it is an\n expression or variable name.\n\n Examples\n --------\n\n To see what classes are available for config, pass no arguments::\n\n In [1]: %config\n Available objects for config:\n AliasManager\n DisplayFormatter\n HistoryManager\n IPCompleter\n LoggingMagics\n MagicsManager\n OSMagics\n PrefilterManager\n ScriptMagics\n TerminalInteractiveShell\n\n To view what is configurable on a given class, just pass the class\n name::\n\n In [2]: %config IPCompleter\n IPCompleter(Completer) options\n ----------------------------\n IPCompleter.backslash_combining_completions=<Bool>\n Enable unicode completions, e.g. \\\\alpha<tab> . Includes completion of latex\n commands, unicode names, and expanding unicode characters back to latex\n commands.\n Current: True\n IPCompleter.debug=<Bool>\n Enable debug for the Completer. Mostly print extra information for\n experimental jedi integration.\n Current: False\n IPCompleter.disable_matchers=<list-item-1>...\n List of matchers to disable.\n Current: []\n IPCompleter.greedy=<Bool>\n Activate greedy completion\n PENDING DEPRECATION. this is now mostly taken care of with Jedi.\n This will enable completion on elements of lists, results of function calls, etc.,\n but can be unsafe because the code is actually evaluated on TAB.\n Current: False\n IPCompleter.jedi_compute_type_timeout=<Int>\n Experimental: restrict time (in milliseconds) during which Jedi can compute types.\n Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt\n performance by preventing jedi to build its cache.\n Current: 400\n IPCompleter.limit_to__all__=<Bool>\n DEPRECATED as of version 5.0.\n Instruct the completer to use __all__ for the completion\n Specifically, when completing on ``object.<tab>``.\n When True: only those names in obj.__all__ will be included.\n When False [default]: the __all__ attribute is ignored\n Current: False\n IPCompleter.merge_completions=<Bool>\n Whether to merge completion results into a single list\n If False, only the completion results from the first non-empty\n completer will be returned.\n As of version 8.5.0, setting the value to ``False`` is an alias for:\n ``IPCompleter.suppress_competing_matchers = True.``.\n Current: True\n IPCompleter.omit__names=<Enum>\n Instruct the completer to omit private method names\n Specifically, when completing on ``object.<tab>``.\n When 2 [default]: all names that start with '_' will be excluded.\n When 1: all 'magic' names (``__foo__``) will be excluded.\n When 0: nothing will be excluded.\n Choices: any of [0, 1, 2]\n Current: 2\n IPCompleter.profile_completions=<Bool>\n If True, emit profiling data for completion subsystem using cProfile.\n Current: False\n IPCompleter.profiler_output_dir=<Unicode>\n Template for path at which to output profile data for completions.\n Current: '.completion_profiles'\n IPCompleter.suppress_competing_matchers=<Union>\n Whether to suppress completions from other `Matchers`_.\n When set to ``None`` (default) the matchers will attempt to auto-detect\n whether suppression of other matchers is desirable. For example, at the\n beginning of a line followed by `%` we expect a magic completion to be the\n only applicable option, and after ``my_dict['`` we usually expect a\n completion with an existing dictionary key.\n If you want to disable this heuristic and see completions from all matchers,\n set ``IPCompleter.suppress_competing_matchers = False``. To disable the\n heuristic for specific matchers provide a dictionary mapping:\n ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher':\n False}``.\n Set ``IPCompleter.suppress_competing_matchers = True`` to limit completions\n to the set of matchers with the highest priority; this is equivalent to\n ``IPCompleter.merge_completions`` and can be beneficial for\n performance, but will sometimes omit relevant candidates from matchers\n further down the priority list.\n Current: False\n IPCompleter.use_jedi=<Bool>\n Experimental: Use Jedi to generate autocompletions. Default to True if jedi\n is installed.\n Current: True\n\n but the real use is in setting values::\n\n In [3]: %config IPCompleter.greedy = True\n\n and these values are read from the user_ns if they are variables::\n\n In [4]: feeling_greedy=False\n\n In [5]: %config IPCompleter.greedy = feeling_greedy\n\n ", "language": "en", "n_whitespaces": 2076, "n_words": 566, "vocab_size": 324 }
https://github.com/ipython/ipython.git
1
test_ne_filters
def test_ne_filters(self, ds, documents): ds.write_documents(documents) result = ds.get_all_documents(filters={"year": {"$ne": "2020"}}) assert len(result) == 3
4dfddf0d1039e134b1ce5aac748de853e2516735
14
test_weaviate.py
72
refactor: Refactor Weaviate tests (#3541) * refactor tests * fix job * revert * revert * revert * use latest weaviate * fix abstract methods signatures * pass class_name to all the CRUD methods * finish moving all the tests * bump weaviate version * raise, don't pass
75,197
0
42
41
14
258,198
14
haystack
9
test/document_stores/test_weaviate.py
Python
4
{ "docstring": "\n Weaviate doesn't include documents if the field is missing,\n so we customize this test\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
https://github.com/deepset-ai/haystack.git
1
previewoutput
def previewoutput(self) -> Tuple[Image.Image, ImageTk.PhotoImage]: assert self._previewoutput is not None return self._previewoutput
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
7
image.py
44
Typing - lib.gui.display_command
21,319
0
33
28
11
101,941
12
faceswap
7
lib/gui/utils/image.py
Python
9
{ "docstring": " Tuple: First item in the tuple is the extract or convert preview image\n (:class:`PIL.Image`), the second item is the image in a format that tkinter can display\n (:class:`PIL.ImageTK.PhotoImage`).\n\n The value of the property is ``None`` if no extract or convert task is running or there are\n no files available in the output folder. ", "language": "en", "n_whitespaces": 82, "n_words": 53, "vocab_size": 36 }
https://github.com/deepfakes/faceswap.git
9
fit
def fit(self, df): # threshold - items below this number get set to zero in cooccurrence counts df.createOrReplaceTempView(self.f("{prefix}df_train_input")) if self.timedecay_formula: # WARNING: previously we would take the last value in training dataframe and set it # as a matrix U element # for each user-item pair. Now with time decay, we compute a sum over ratings given # by a user in the case # when T=np.inf, so user gets a cumulative sum of ratings for a particular item and # not the last rating. # Time Decay # does a group by on user item pairs and apply the formula for time decay there # Time T parameter is in days and input time is in seconds, # so we do dt/60/(T*24*60)=dt/(T*24*3600) # the following is the query which we want to run query = self.f( ) # replace with timedecayed version df = self.spark.sql(query) else: # since SQL is case-insensitive, this check needs to be performed similar if self.header["col_timestamp"].lower() in [ s.name.lower() for s in df.schema ]: # we need to de-duplicate items by using the latest item query = self.f( ) df = self.spark.sql(query) df.createOrReplaceTempView(self.f("{prefix}df_train")) log.info("sarplus.fit 1/2: compute item cooccurrences...") # compute cooccurrence above minimum threshold query = self.f( ) item_cooccurrence = self.spark.sql(query) item_cooccurrence.write.mode("overwrite").saveAsTable( self.f("{prefix}item_cooccurrence") ) # compute the diagonal used later for Jaccard and Lift if self.similarity_type == SIM_LIFT or self.similarity_type == SIM_JACCARD: item_marginal = self.spark.sql( self.f( "SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2" ) ) item_marginal.createOrReplaceTempView(self.f("{prefix}item_marginal")) if self.similarity_type == SIM_COOCCUR: self.item_similarity = item_cooccurrence elif self.similarity_type == SIM_JACCARD: query = self.f( ) self.item_similarity = self.spark.sql(query) elif self.similarity_type == SIM_LIFT: query = self.f( ) self.item_similarity = self.spark.sql(query) else: raise ValueError( "Unknown similarity type: {0}".format(self.similarity_type) ) # store upper triangular log.info( "sarplus.fit 2/2: compute similarity metric %s..." % self.similarity_type ) self.item_similarity.write.mode("overwrite").saveAsTable( self.f("{prefix}item_similarity_upper") ) # expand upper triangular to full matrix query = self.f( ) self.item_similarity = self.spark.sql(query) self.item_similarity.write.mode("overwrite").saveAsTable( self.f("{prefix}item_similarity") ) # free space self.spark.sql(self.f("DROP TABLE {prefix}item_cooccurrence")) self.spark.sql(self.f("DROP TABLE {prefix}item_similarity_upper")) self.item_similarity = self.spark.table(self.f("{prefix}item_similarity"))
2b98f1045321475f6537986af134fb53f8320268
14
SARPlus.py
669
Correct typos
7,143
0
1,172
375
175
39,221
329
recommenders
29
contrib/sarplus/python/pysarplus/SARPlus.py
Python
109
{ "docstring": "Main fit method for SAR.\n\n Expects the dataframes to have row_id, col_id columns which are indexes,\n i.e. contain the sequential integer index of the original alphanumeric user and item IDs.\n Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.\n\n Arguments:\n df (pySpark.DataFrame): input dataframe which contains the index of users and items.\n \n SELECT\n {col_user}, {col_item}, \n SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}\n FROM {prefix}df_train_input,\n (SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)\n GROUP BY {col_user}, {col_item} \n CLUSTER BY {col_user} \n \n SELECT {col_user}, {col_item}, {col_rating}\n FROM\n (\n SELECT\n {col_user}, {col_item}, {col_rating}, \n ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest\n FROM {prefix}df_train_input\n )\n WHERE latest = 1\n \n SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value\n FROM {prefix}df_train A INNER JOIN {prefix}df_train B\n ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item} \n GROUP BY A.{col_item}, B.{col_item}\n HAVING COUNT(*) >= {threshold}\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value / (M1.margin * M2.margin) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value\n FROM\n (\n (SELECT i1, i2, value FROM {prefix}item_similarity_upper)\n UNION ALL\n (SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)\n )\n CLUSTER BY i1\n ", "language": "en", "n_whitespaces": 854, "n_words": 255, "vocab_size": 133 }
https://github.com/microsoft/recommenders.git
4
check_docker_permission
def check_docker_permission(verbose) -> bool: permission_denied = False docker_permission_command = ["docker", "info"] try: _ = run_command( docker_permission_command, verbose=verbose, no_output_dump_on_exception=True, capture_output=True, text=True, check=True, ) except subprocess.CalledProcessError as ex: permission_denied = True if ex.stdout and 'Got permission denied while trying to connect' in ex.stdout: console.print('ERROR: You have `permission denied` error when trying to communicate with docker.') console.print( 'Most likely you need to add your user to `docker` group: \ https://docs.docker.com/ engine/install/linux-postinstall/ .' ) return permission_denied
4ffd4f09532fceb67675fce4c1f5cd383eff992e
13
docker_command_utils.py
139
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
8,991
0
247
82
62
46,787
72
airflow
17
dev/breeze/src/airflow_breeze/utils/docker_command_utils.py
Python
29
{ "docstring": "\n Checks if we have permission to write to docker socket. By default, on Linux you need to add your user\n to docker group and some new users do not realize that. We help those users if we have\n permission to run docker commands.\n\n :param verbose: print commands when running\n :return: True if permission is denied.\n ", "language": "en", "n_whitespaces": 74, "n_words": 55, "vocab_size": 42 }
https://github.com/apache/airflow.git
1
test_ppo_legacy_config
def test_ppo_legacy_config(self): ppo_config = ppo.DEFAULT_CONFIG # Expect warning. print(f"Accessing learning-rate from legacy config dict: {ppo_config['lr']}") # Build Algorithm. ppo_trainer = ppo.PPO(config=ppo_config, env="CartPole-v1") print(ppo_trainer.train())
e50165492587556dac318418a0121e72bf93baa7
11
test_ppo.py
79
Revert "[RLlib] @deprecate(error=True|False) escalation. (#28733)" (#28795) Signed-off-by: Amog Kamsetty [email protected] Reverts #28733 Breaks ray-air/examples:rl_offline_example and ray-air/examples:rl_online_example
28,577
0
72
38
21
127,982
23
ray
11
rllib/algorithms/ppo/tests/test_ppo.py
Python
5
{ "docstring": "Tests, whether the old PPO config dict is still functional.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
3
command_coverage_analyze_targets_missing
def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTargetsMissingConfig) -> None host_state = prepare_profiles(args) # coverage analyze targets missing if args.delegate: raise Delegate(host_state=host_state) from_targets, from_path_arcs, from_path_lines = read_report(args.from_file) to_targets, to_path_arcs, to_path_lines = read_report(args.to_file) target_indexes = {} # type: TargetIndexes if args.only_gaps: arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists) lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists) else: arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists) lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists) report = make_report(target_indexes, arcs, lines) write_report(args, report, args.output_file)
a06fa496d3f837cca3c437ab6e9858525633d147
12
missing.py
216
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
78,543
0
144
147
47
266,732
76
ansible
26
test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py
Python
15
{ "docstring": "Identify aggregated coverage in one file missing from another.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ansible/ansible.git
7
get
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'): if 'saveAndExit();' not in jscode: raise ExtractorError('`saveAndExit();` not found in `jscode`') if not html: html = self.extractor._download_webpage(url, video_id, note=note, headers=headers) with open(self._TMP_FILES['html'].name, 'wb') as f: f.write(html.encode('utf-8')) self._save_cookies(url) replaces = self.options replaces['url'] = url user_agent = headers.get('User-Agent') or std_headers['User-Agent'] replaces['ua'] = user_agent.replace('"', '\\"') replaces['jscode'] = jscode for x in self._TMP_FILE_NAMES: replaces[x] = self._TMP_FILES[x].name.replace('\\', '\\\\').replace('"', '\\"') with open(self._TMP_FILES['script'].name, 'wb') as f: f.write(self._TEMPLATE.format(**replaces).encode('utf-8')) if video_id is None: self.extractor.to_screen('%s' % (note2,)) else: self.extractor.to_screen('%s: %s' % (video_id, note2)) p = subprocess.Popen([ self.exe, '--ssl-protocol=any', self._TMP_FILES['script'].name ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process_communicate_or_kill(p) if p.returncode != 0: raise ExtractorError( 'Executing JS failed\n:' + encodeArgument(err)) with open(self._TMP_FILES['html'].name, 'rb') as f: html = f.read().decode('utf-8') self._load_cookies() return (html, encodeArgument(out))
0700fde6403aa9eec1ff02bff7323696a205900c
15
openload.py
604
[utils, etc] Kill child processes when yt-dl is killed * derived from PR #26592, closes #26592 Authored by: Unrud
22,352
0
404
352
92
106,355
121
youtube-dl
44
youtube_dl/extractor/openload.py
Python
33
{ "docstring": "\n Downloads webpage (if needed) and executes JS\n\n Params:\n url: website url\n html: optional, html code of website\n video_id: video id\n note: optional, displayed when downloading webpage\n note2: optional, displayed when executing JS\n headers: custom http headers\n jscode: code to be executed when page is loaded\n\n Returns tuple with:\n * downloaded website (after JS execution)\n * anything you print with `console.log` (but not inside `page.execute`!)\n\n In most cases you don't need to add any `jscode`.\n It is executed in `page.onLoadFinished`.\n `saveAndExit();` is mandatory, use it instead of `phantom.exit()`\n It is possible to wait for some element on the webpage, for example:\n var check = function() {\n var elementFound = page.evaluate(function() {\n return document.querySelector('#b.done') !== null;\n });\n if(elementFound)\n saveAndExit();\n else\n window.setTimeout(check, 500);\n }\n\n page.evaluate(function(){\n document.querySelector('#a').click();\n });\n check();\n ", "language": "en", "n_whitespaces": 446, "n_words": 125, "vocab_size": 99 }
https://github.com/ytdl-org/youtube-dl.git
1
all
def all(x, axis=None, keepdims=False): x = tf.cast(x, tf.bool) return tf.reduce_all(x, axis, keepdims) @keras_export("keras.backend.argmax") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.argmax") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
9
backend.py
85
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,171
1
21
37
15
269,545
15
keras
14
keras/backend.py
Python
3
{ "docstring": "Bitwise reduction (logical AND).\n\n Args:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n keepdims: whether the drop or broadcast the reduction axes.\n\n Returns:\n A uint8 tensor (0s and 1s).\n ", "language": "en", "n_whitespaces": 70, "n_words": 33, "vocab_size": 29 }
https://github.com/keras-team/keras.git
1
test_get_db_records
def test_get_db_records(self): string = StringIndexer.objects.create(organization_id=123, string="oop") collection = KeyCollection({123: {"oop"}}) key = "123:oop" assert indexer_cache.get(key) is None assert indexer_cache.get(string.id) is None self.indexer._get_db_records(self.use_case_id, collection) assert indexer_cache.get(string.id) is None assert indexer_cache.get(key) is None
7f60db924ea37f34e0cfe6856777239e2a2ffe13
12
test_postgres_indexer.py
147
feat(metrics): make indexer more configurable (#35604) This makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic: - configurable input Kafka topic - configurable output Kafka topic - configurable model from which to pull index results - tags for internal metrics to distinguish between the two modes operationally
18,794
0
94
89
18
91,722
31
sentry
16
tests/sentry/sentry_metrics/test_postgres_indexer.py
Python
9
{ "docstring": "\n Make sure that calling `_get_db_records` doesn't populate the cache\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/getsentry/sentry.git
2
weekday
def weekday(year, month, day): if not datetime.MINYEAR <= year <= datetime.MAXYEAR: year = 2000 + year % 400 return datetime.date(year, month, day).weekday()
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
calendar.py
68
add python 3.10.4 for windows
56,278
0
38
44
18
221,226
22
XX-Net
8
python3.10.4/Lib/calendar.py
Python
4
{ "docstring": "Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/XX-net/XX-Net.git
5
select_config
def select_config(Xraw, yraw, current, newpoint, bounds, num_f): length = select_length(Xraw, yraw, bounds, num_f) Xraw = Xraw[-length:, :] yraw = yraw[-length:] base_vals = np.array(list(bounds.values())).T oldpoints = Xraw[:, :num_f] old_lims = np.concatenate( (np.max(oldpoints, axis=0), np.min(oldpoints, axis=0)) ).reshape(2, oldpoints.shape[1]) limits = np.concatenate((old_lims, base_vals), axis=1) X = normalize(Xraw, limits) y = standardize(yraw).reshape(yraw.size, 1) fixed = normalize(newpoint, oldpoints) kernel = TV_SquaredExp( input_dim=X.shape[1], variance=1.0, lengthscale=1.0, epsilon=0.1 ) try: m = GPy.models.GPRegression(X, y, kernel) except np.linalg.LinAlgError: # add diagonal ** we would ideally make this something more robust... X += np.eye(X.shape[0]) * 1e-3 m = GPy.models.GPRegression(X, y, kernel) try: m.optimize() except np.linalg.LinAlgError: # add diagonal ** we would ideally make this something more robust... X += np.eye(X.shape[0]) * 1e-3 m = GPy.models.GPRegression(X, y, kernel) m.optimize() m.kern.lengthscale.fix(m.kern.lengthscale.clip(1e-5, 1)) if current is None: m1 = deepcopy(m) else: # add the current trials to the dataset padding = np.array([fixed for _ in range(current.shape[0])]) current = normalize(current, base_vals) current = np.hstack((padding, current)) Xnew = np.vstack((X, current)) ypad = np.zeros(current.shape[0]) ypad = ypad.reshape(-1, 1) ynew = np.vstack((y, ypad)) # kernel = GPy.kern.RBF(input_dim=X.shape[1], variance=1., # lengthscale=1.) kernel = TV_SquaredExp( input_dim=X.shape[1], variance=1.0, lengthscale=1.0, epsilon=0.1 ) m1 = GPy.models.GPRegression(Xnew, ynew, kernel) m1.optimize() xt = optimize_acq(UCB, m, m1, fixed, num_f) # convert back... xt = xt * (np.max(base_vals, axis=0) - np.min(base_vals, axis=0)) + np.min( base_vals, axis=0 ) xt = xt.astype(np.float32) return xt
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
17
pb2.py
783
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,714
0
497
536
127
132,255
216
ray
63
python/ray/tune/schedulers/pb2.py
Python
49
{ "docstring": "Selects the next hyperparameter config to try.\n\n This function takes the formatted data, fits the GP model and optimizes the\n UCB acquisition function to select the next point.\n\n Args:\n Xraw (np.array): The un-normalized array of hyperparams, Time and\n Reward\n yraw (np.array): The un-normalized vector of reward changes.\n current (list): The hyperparams of trials currently running. This is\n important so we do not select the same config twice. If there is\n data here then we fit a second GP including it\n (with fake y labels). The GP variance doesn't depend on the y\n labels so it is ok.\n newpoint (np.array): The Reward and Time for the new point.\n We cannot change these as they are based on the *new weights*.\n bounds (dict): Bounds for the hyperparameters. Used to normalize.\n num_f (int): The number of fixed params. Almost always 2 (reward+time)\n\n Return:\n xt (np.array): A vector of new hyperparameters.\n ", "language": "en", "n_whitespaces": 277, "n_words": 147, "vocab_size": 100 }
https://github.com/ray-project/ray.git
1
broadcast
def broadcast(self, tensors, broadcast_options=BroadcastOptions()): root_rank = broadcast_options.root_rank
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
7
gloo_collective_group.py
32
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,889
0
21
31
7
132,977
7
ray
6
python/ray/util/collective/collective_group/gloo_collective_group.py
Python
13
{ "docstring": "Broadcast tensors to all other processes following options.\n\n Args:\n tensors (List): tensors to be broadcast or received.\n broadcast_options: broadcast options.\n\n Returns:\n None\n ", "language": "en", "n_whitespaces": 76, "n_words": 22, "vocab_size": 17 }
https://github.com/ray-project/ray.git
8
_handle_fk_field_node
def _handle_fk_field_node(self, node, field): # Check if there is a child node named 'None', returning None if so. if node.getElementsByTagName("None"): return None else: model = field.remote_field.model if hasattr(model._default_manager, "get_by_natural_key"): keys = node.getElementsByTagName("natural") if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] try: obj = model._default_manager.db_manager( self.db ).get_by_natural_key(*field_value) except ObjectDoesNotExist: if self.handle_forward_references: return base.DEFER_FIELD else: raise obj_pk = getattr(obj, field.remote_field.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if field.remote_field.model._meta.pk.remote_field: obj_pk = obj_pk.pk else: # Otherwise, treat like a normal PK field_value = getInnerText(node).strip() obj_pk = model._meta.get_field( field.remote_field.field_name ).to_python(field_value) return obj_pk else: field_value = getInnerText(node).strip() return model._meta.get_field(field.remote_field.field_name).to_python( field_value )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
20
xml_serializer.py
324
Refs #33476 -- Reformatted code with Black.
50,882
0
769
194
83
204,769
126
django
29
django/core/serializers/xml_serializer.py
Python
32
{ "docstring": "\n Handle a <field> node for a ForeignKey\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 6 }
https://github.com/django/django.git
3
_get_relevant_site_root_paths
def _get_relevant_site_root_paths(self, cache_object=None): return tuple( srp for srp in self._get_site_root_paths(cache_object) if self.url_path.startswith(srp.root_path) )
29a7f701611d22ab9c7f12f1134aeac1d31b9438
11
__init__.py
56
Add Page._get_relevant_site_root_paths() for use in signal handlers
15,553
0
67
35
12
70,769
13
wagtail
9
wagtail/core/models/__init__.py
Python
6
{ "docstring": "\n .. versionadded::2.16\n\n Returns a tuple of root paths for all sites this page belongs to.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/wagtail/wagtail.git
14
_get_data_iterator_from_dataset
def _get_data_iterator_from_dataset(dataset, dataset_type_spec): if dataset_type_spec == list: if len(dataset) == 0: raise ValueError('Received an empty list dataset. ' 'Please provide a non-empty list of arrays.') if _get_type_spec(dataset[0]) is np.ndarray: expected_shape = dataset[0].shape for i, element in enumerate(dataset): if np.array(element).shape[0] != expected_shape[0]: raise ValueError('Received a list of NumPy arrays with different ' f'lengths. Mismatch found at index {i}, ' f'Expected shape={expected_shape} ' f'Received shape={np.array(element).shape}.' f'Please provide a list of NumPy arrays with ' f'the same length.') else: raise ValueError('Expected a list of `numpy.ndarray` objects,' f'Received: {type(dataset[0])}') return iter(zip(*dataset)) elif dataset_type_spec == tuple: if len(dataset) == 0: raise ValueError('Received an empty list dataset.' 'Please provide a non-empty tuple of arrays.') if _get_type_spec(dataset[0]) is np.ndarray: expected_shape = dataset[0].shape for i, element in enumerate(dataset): if np.array(element).shape[0] != expected_shape[0]: raise ValueError('Received a tuple of NumPy arrays with different ' f'lengths. Mismatch found at index {i}, ' f'Expected shape={expected_shape} ' f'Received shape={np.array(element).shape}.' f'Please provide a tuple of NumPy arrays with ' 'the same length.') else: raise ValueError('Expected a tuple of `numpy.ndarray` objects, ' f'Received: {type(dataset[0])}') return iter(zip(*dataset)) elif dataset_type_spec == tf.data.Dataset: if is_batched(dataset): dataset = dataset.unbatch() return iter(dataset) elif dataset_type_spec == np.ndarray: return iter(dataset)
06f5ef7989db314ee210455b04fe6f71e8dc57a7
22
dataset_utils.py
486
Export split_dataset utility. PiperOrigin-RevId: 447783753
79,995
0
651
248
73
269,271
188
keras
24
keras/utils/dataset_utils.py
Python
43
{ "docstring": "Get the iterator from a dataset.\n\n Args:\n dataset : A `tf.data.Dataset` object or a list/tuple of arrays.\n dataset_type_spec : the type of the dataset\n\n Raises:\n ValueError:\n - If the dataset is empty.\n - If the dataset is not a `tf.data.Dataset` object\n or a list/tuple of arrays.\n - If the dataset is a list/tuple of arrays and the\n length of the list/tuple is not equal to the number\n\n Returns:\n iterator: An `iterator` object.\n ", "language": "en", "n_whitespaces": 176, "n_words": 72, "vocab_size": 36 }
https://github.com/keras-team/keras.git
1
get
def get(self): return self.val __test__ = {"_TestClass": _TestClass, "string": r, "bool-int equivalence": r, "blank lines": r, "ellipsis": r, "whitespace normalization": r, }
8198943edd73a363c266633e1aa5b2a9e9c9f526
7
doctest.py
78
add python 3.10.4 for windows
56,895
0
100
10
18
223,429
22
XX-Net
5
python3.10.4/Lib/doctest.py
Python
2
{ "docstring": "get() -> return TestClass's associated value.\n\n >>> x = _TestClass(-42)\n >>> print(x.get())\n -42\n \n Example of a string object, searched as-is.\n >>> x = 1; y = 2\n >>> x + y, x * y\n (3, 2)\n \n In 2.2, boolean expressions displayed\n 0 or 1. By default, we still accept\n them. This can be disabled by passing\n DONT_ACCEPT_TRUE_FOR_1 to the new\n optionflags argument.\n >>> 4 == 4\n 1\n >>> 4 == 4\n True\n >>> 4 > 4\n 0\n >>> 4 > 4\n False\n \n Blank lines can be marked with <BLANKLINE>:\n >>> print('foo\\n\\nbar\\n')\n foo\n <BLANKLINE>\n bar\n <BLANKLINE>\n \n If the ellipsis flag is used, then '...' can be used to\n elide substrings in the desired output:\n >>> print(list(range(1000))) #doctest: +ELLIPSIS\n [0, 1, 2, ..., 999]\n \n If the whitespace normalization flag is used, then\n differences in whitespace are ignored.\n >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,\n 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,\n 27, 28, 29]\n ", "language": "en", "n_whitespaces": 1118, "n_words": 169, "vocab_size": 123 }
https://github.com/XX-net/XX-Net.git
6
_where
def _where(condition, x=None, y=None): if x is None or y is None: raise ValueError("Either both or neither of the x and y arguments should " "be provided to jax.numpy.where, got {} and {}." .format(x, y)) if not issubdtype(_dtype(condition), bool_): condition = lax.ne(condition, zeros_like(condition)) x, y = _promote_dtypes(x, y) condition, x, y = broadcast_arrays(condition, x, y) try: is_always_empty = core.is_empty_shape(np.shape(x)) except: is_always_empty = False # can fail with dynamic shapes return lax.select(condition, x, y) if not is_always_empty else x _WHERE_DOC = @_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)
d9dcd1394aedf760272f14c3560cd5415495c28a
@_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)
13
lax_numpy.py
217
djax: let make_jaxpr build dyn shape jaxprs
26,551
1
135
120
60
119,182
83
jax
25
jax/_src/numpy/lax_numpy.py
Python
12
{ "docstring": "\\\nAt present, JAX does not support JIT-compilation of the single-argument form\nof :py:func:`jax.numpy.where` because its output shape is data-dependent. The\nthree-argument form does not have a data-dependent shape and can be JIT-compiled\nsuccessfully. Alternatively, you can specify the optional ``size`` keyword:\nif specified, the first ``size`` True elements will be returned; if there\nare fewer True elements than ``size`` indicates, the index arrays will be\npadded with ``fill_value`` (default is 0.)\n", "language": "en", "n_whitespaces": 64, "n_words": 72, "vocab_size": 54 }
https://github.com/google/jax.git
1
_get_variables_path
def _get_variables_path(export_dir): return tf.io.gfile.join( tf.compat.as_text(_get_variables_dir(export_dir)), tf.compat.as_text(tf.saved_model.VARIABLES_FILENAME), )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
saved_model_experimental.py
66
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,598
0
30
41
7
276,217
7
keras
11
keras/saving/saved_model_experimental.py
Python
5
{ "docstring": "Return the variables path, used as the prefix for checkpoint files.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/keras-team/keras.git
5
test_quantize_time_jitter
def test_quantize_time_jitter(self): i = j = None starting_key = quantize_time(self.now, 0, duration=10) for i in range(11): current_key = quantize_time(self.now + timedelta(seconds=i), 0, duration=10) if current_key != starting_key: break other_key = quantize_time(self.now, 5, duration=10) for j in range(11): current_key = quantize_time(self.now + timedelta(seconds=j), 5, duration=10) if current_key != other_key: break assert i != j
51403cc4c85c9c595a3b2d0ab5c2c1c4e33a3a1e
14
test_snuba.py
173
fix(sessions): Prevent query mutation behavior in `_prepare_query_params` (#31422) * fix(sessions): Prevent query mutation behavior in `_prepare_query_params` Fixes the behavior of `_prepare_query_params` that mutates the conditions passed from the query. * Add test that validates the change
19,259
0
176
113
26
95,947
53
sentry
13
tests/sentry/utils/test_snuba.py
Python
13
{ "docstring": "Different key hashes should change keys at different times\n\n While starting_key and other_key might begin as the same values they should change at different times\n ", "language": "en", "n_whitespaces": 39, "n_words": 25, "vocab_size": 20 }
https://github.com/getsentry/sentry.git
3
clip
def clip(a, min_a, max_a): if a < min_a: return min_a elif a > max_a: return max_a return a
8841e39a20b501e38091df126a62bb7440931089
8
simple_functions.py
42
Document and type `simple_functions.py` (#2674) * 🏷️ Add types to simple_functions.py * 💄 Neaten binary_search() Add spacing between signature and code. Remove and expressions and address IDE warnings * 📝 Add docstrings for functions in simple_functions.py * 🎨 Reorder functions alphabetically * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * 🐛 Reformat code examples These were causing checks to fail due to missing spaces after `>>>` I had wanted to change these to be more consistent with iterables.py anyway. * 🎨 Change single tics to double Change \` to \`` - this ensures that the variable names are actually displayed as code (and not italics) * improved docstrings, rewrote examples as doctests * fix (???) unrelated failing doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed typo * Update manim/utils/simple_functions.py Co-authored-by: Luca <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
46,218
0
44
26
14
189,806
18
manim
4
manim/utils/simple_functions.py
Python
6
{ "docstring": "Clips ``a`` to the interval [``min_a``, ``max_a``].\n\n Accepts any comparable objects (i.e. those that support <, >).\n Returns ``a`` if it is between ``min_a`` and ``max_a``.\n Otherwise, whichever of ``min_a`` and ``max_a`` is closest.\n\n Examples\n --------\n ::\n\n >>> clip(15, 11, 20)\n 15\n >>> clip('a', 'h', 'k')\n 'h'\n ", "language": "en", "n_whitespaces": 96, "n_words": 47, "vocab_size": 42 }
https://github.com/ManimCommunity/manim.git
1
fromiter
def fromiter(*args, **kwargs): raise NotImplementedError( "jnp.fromiter() is not implemented because it may be non-pure and thus unsafe for use " "with JIT and other JAX transformations. Consider using jnp.asarray(np.fromiter(...)) " "instead, although care should be taken if np.fromiter is used within a jax transformations " "because of its potential side-effect of consuming the iterable object; for more information see " "https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions") @_wraps(np.fromfunction)
fbfc3d8edfbf8dc5eb2f38c2bb315646a94ad399
@_wraps(np.fromfunction)
9
lax_numpy.py
54
Better error messages for jnp.fromiter and jnp.fromfile
26,720
1
78
19
54
119,949
62
jax
7
jax/_src/numpy/lax_numpy.py
Python
7
{ "docstring": "Unimplemented JAX wrapper for jnp.fromiter.\n\n This function is left deliberately unimplemented because it may be non-pure and thus\n unsafe for use with JIT and other JAX transformations. Consider using\n ``jnp.asarray(np.fromiter(...))`` instead, although care should be taken if ``np.fromiter``\n is used within jax transformations because of its potential side-effect of consuming the\n iterable object; for more information see `Common Gotchas: Pure Functions\n <https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions>`_.\n ", "language": "en", "n_whitespaces": 69, "n_words": 62, "vocab_size": 54 }
https://github.com/google/jax.git
1
bcoo_multiply_sparse
def bcoo_multiply_sparse(lhs, rhs): out_data, out_indices, out_shape = _bcoo_multiply_sparse( lhs.data, lhs.indices, rhs.data, rhs.indices, lhs_spinfo=lhs._info, rhs_spinfo=rhs._info) return BCOO((out_data, out_indices), shape=out_shape)
3184dd65a222354bffa2466d9a375162f5649132
10
bcoo.py
82
[sparse] Update docstrings for bcoo primitives. PiperOrigin-RevId: 438685829
26,730
0
31
57
18
119,978
18
jax
14
jax/experimental/sparse/bcoo.py
Python
5
{ "docstring": "An element-wise multiplication of two sparse arrays.\n\n Args:\n lhs: A BCOO-format array.\n rhs: A BCOO-format array.\n\n Returns:\n An BCOO-format array containing the result.\n ", "language": "en", "n_whitespaces": 35, "n_words": 23, "vocab_size": 18 }
https://github.com/google/jax.git
6
sparse_top_k_categorical_matches
def sparse_top_k_categorical_matches(y_true, y_pred, k=5): reshape_matches = False y_true = tf.convert_to_tensor(y_true) y_pred = tf.convert_to_tensor(y_pred) y_true_rank = y_true.shape.ndims y_pred_rank = y_pred.shape.ndims y_true_org_shape = tf.shape(y_true) # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None): if y_pred_rank > 2: y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]]) if y_true_rank > 1: reshape_matches = True y_true = tf.reshape(y_true, [-1]) matches = tf.cast( tf.math.in_top_k( predictions=y_pred, targets=tf.cast(y_true, "int32"), k=k ), dtype=backend.floatx(), ) # returned matches is expected to have same shape as y_true input if reshape_matches: return tf.reshape(matches, shape=y_true_org_shape) return matches
84afc5193d38057e2e2badf9c889ea87d80d8fbf
15
metrics_utils.py
268
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,815
0
220
172
61
276,983
92
keras
22
keras/utils/metrics_utils.py
Python
22
{ "docstring": "Creates float Tensor, 1.0 for label-TopK_prediction match, 0.0 for mismatch.\n\n Args:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n k: (Optional) Number of top elements to look at for computing accuracy.\n Defaults to 5.\n\n Returns:\n Match tensor: 1.0 for label-prediction match, 0.0 for mismatch.\n ", "language": "en", "n_whitespaces": 82, "n_words": 46, "vocab_size": 33 }
https://github.com/keras-team/keras.git
2
validate_duplication
def validate_duplication(self): term = frappe.db.sql( , (self.academic_year, self.term_name, self.name), ) if term: frappe.throw( _( "An academic term with this 'Academic Year' {0} and 'Term Name' {1} already exists. Please modify these entries and try again." ).format(self.academic_year, self.term_name) )
494bd9ef78313436f0424b918f200dab8fc7c20b
14
academic_term.py
84
style: format code with black
14,043
0
27
53
35
65,856
38
erpnext
12
erpnext/education/doctype/academic_term/academic_term.py
Python
12
{ "docstring": "select name from `tabAcademic Term` where academic_year= %s and term_name= %s\n and docstatus<2 and name != %s", "language": "en", "n_whitespaces": 19, "n_words": 17, "vocab_size": 12 }
https://github.com/frappe/erpnext.git
3
get_repositories
def get_repositories(self) -> Sequence[JSONData]: # Explicitly typing to satisfy mypy. repos: JSONData = self.get_with_pagination( "/installation/repositories", response_key="repositories" ) return [repo for repo in repos if not repo.get("archived")] # XXX: Find alternative approach
d6bcead1be02914e9734ab23f5e476b3d6f3f2cb
11
client.py
73
fix(github): Add pagination when fetching repositories (#39750) We are not using pagination for Github's repositories endpoint. This means we were getting up to a maximum of 100 repositories. I do not know how no one hit any issues in the past. This is work to support WOR-2234 and creating automatic code mappings.
18,160
0
80
41
30
86,729
31
sentry
9
src/sentry/integrations/github/client.py
Python
9
{ "docstring": "\n This fetches all repositories accessible to the Github App\n https://docs.github.com/en/rest/apps/installations#list-repositories-accessible-to-the-app-installation\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
https://github.com/getsentry/sentry.git
8
handle_display_options
def handle_display_options(self, option_order): from distutils.core import gen_usage # User just wants a list of commands -- we'll print it out and stop # processing now (ie. if they ran "setup --help-commands foo bar", # we ignore "foo bar"). if self.help_commands: self.print_commands() print('') print(gen_usage(self.script_name)) return 1 # If user supplied any of the "display metadata" options, then # display that metadata in the order in which the user supplied the # metadata options. any_display_options = 0 is_display_option = {} for option in self.display_options: is_display_option[option[0]] = 1 for (opt, val) in option_order: if val and is_display_option.get(opt): opt = translate_longopt(opt) value = getattr(self.metadata, "get_"+opt)() if opt in ['keywords', 'platforms']: print(','.join(value)) elif opt in ('classifiers', 'provides', 'requires', 'obsoletes'): print('\n'.join(value)) else: print(value) any_display_options = 1 return any_display_options
8198943edd73a363c266633e1aa5b2a9e9c9f526
17
dist.py
267
add python 3.10.4 for windows
56,798
0
461
152
88
222,884
122
XX-Net
22
python3.10.4/Lib/distutils/dist.py
Python
24
{ "docstring": "If there were any non-global \"display-only\" options\n (--help-commands or the metadata display options) on the command\n line, display the requested info and return true; else return\n false.\n ", "language": "en", "n_whitespaces": 55, "n_words": 27, "vocab_size": 23 }
https://github.com/XX-net/XX-Net.git
1
test_push_unread_count_group_by_room
def test_push_unread_count_group_by_room(self): # Carry out common push count tests and setup self._test_push_unread_count() # Carry out our option-value specific test # # This push should still only contain an unread count of 1 (for 1 unread room) self._check_push_attempt(6, 1)
40771773909cb03d9296e3f0505e4e32372f10aa
7
test_http.py
38
Prevent duplicate push notifications for room reads (#11835)
71,168
0
87
19
29
246,345
38
synapse
4
tests/push/test_http.py
Python
3
{ "docstring": "\n The HTTP pusher will group unread count by number of unread rooms.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
1
test_https_good_referer
def test_https_good_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_REFERER"] = "https://www.example.com/somepage" mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
tests.py
118
Refs #33476 -- Reformatted code with Black.
50,121
0
88
68
20
202,419
25
django
13
tests/csrf_tests/tests.py
Python
9
{ "docstring": "\n A POST HTTPS request with a good referer is accepted.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
7
as_Boolean
def as_Boolean(e): from sympy.core.symbol import Symbol if e == True: return true if e == False: return false if isinstance(e, Symbol): z = e.is_zero if z is None: return e return false if z else true if isinstance(e, Boolean): return e raise TypeError('expecting bool or Boolean, not `%s`.' % e) @sympify_method_args
0bed141b2d875829e5caf6923431185ba16c625a
@sympify_method_args
9
boolalg.py
117
Cache replacement tuples, do not lookup true/false, new replacements
49,274
1
124
71
33
199,450
51
sympy
14
sympy/logic/boolalg.py
Python
14
{ "docstring": "Like ``bool``, return the Boolean value of an expression, e,\n which can be any instance of :py:class:`~.Boolean` or ``bool``.\n\n Examples\n ========\n\n >>> from sympy import true, false, nan\n >>> from sympy.logic.boolalg import as_Boolean\n >>> from sympy.abc import x\n >>> as_Boolean(0) is false\n True\n >>> as_Boolean(1) is true\n True\n >>> as_Boolean(x)\n x\n >>> as_Boolean(2)\n Traceback (most recent call last):\n ...\n TypeError: expecting bool or Boolean, not `2`.\n >>> as_Boolean(nan)\n Traceback (most recent call last):\n ...\n TypeError: expecting bool or Boolean, not `nan`.\n\n ", "language": "en", "n_whitespaces": 144, "n_words": 81, "vocab_size": 53 }
https://github.com/sympy/sympy.git
4
call_exchanges
def call_exchanges(self, other_args): filters = ( pycoingecko_model.EXCHANGES_FILTERS + coinpaprika_model.EXCHANGES_FILTERS ) parser = argparse.ArgumentParser( prog="exchanges", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="display N number records", default=15, ) parser.add_argument( "-s", "--sortby", dest="sortby", type=str, help="Sort by given column. Default: Rank", default="Rank", choices=filters, ) parser.add_argument( "-r", "--reverse", action="store_true", dest="reverse", default=False, help=( "Data is sorted in descending order by default. " "Reverse flag will sort it in an ascending way. " "Only works when raw data is displayed." ), ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help="Flag to add a url column. Works only with CoinGecko source", default=False, ) parser.add_argument( "--vs", help="Quoted currency. Default: USD. Works only with CoinPaprika source", dest="vs", default="USD", type=str, choices=CURRENCIES, ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: if ns_parser.source == "CoinGecko": pycoingecko_view.display_exchanges( limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, ascend=ns_parser.reverse, links=ns_parser.urls, ) elif ns_parser.source == "CoinPaprika": coinpaprika_view.display_all_exchanges( symbol=ns_parser.vs, limit=ns_parser.limit, ascend=ns_parser.reverse, sortby=ns_parser.sortby, export=ns_parser.export, )
0ae89d6cc20be84bf49c31e437fda38a845ebc68
14
overview_controller.py
446
Style fixing: removing --ascend/--descend (#3395) * stocks candle to use reverse * qa raw to use reverse * etf candle to use reverse * oss rossix to use reverse * crypto/defi to use reverse * crypto/disc to use reverse * added test * crypto/dd to use reverse * crypto/onchain to use reverse * crypto/ov to use revert * forex candle to use revert * conibase controller to use revert * tests to use reverse * covid to use reverse * removing ascend * removing ascend from econ * more removing ascend * more removing ascend * more removing ascend * fixing stuff on .md files * fixed economy controller tests * fixed screener tests * fa controller to use comma separated when multiple inputs
85,855
0
1,004
278
108
286,532
143
OpenBBTerminal
42
openbb_terminal/cryptocurrency/overview/overview_controller.py
Python
83
{ "docstring": "Process exchanges commandShows Top Crypto Exchanges\n You can display only N number exchanges with --limit parameter.\n You can sort data by Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC with --sortby\n Or you can sort data by 'name', 'currencies', 'markets', 'fiats', 'confidence',\n 'volume_24h', 'volume_7d', 'volume_30d', 'sessions_per_month'\n if you are using the alternative source CoinPaprika\n and also with --reverse flag to sort ascending.\n Flag --urls will display urls.\n Displays: Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC", "language": "en", "n_whitespaces": 191, "n_words": 72, "vocab_size": 54 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
4
autocomplete
def autocomplete(self): texts = [] for field in self.search_fields: for current_field, value in self.prepare_field(self.obj, field): if isinstance(current_field, AutocompleteField): texts.append((value)) return " ".join(texts)
d10f15e55806c6944827d801cd9c2d53f5da4186
14
mysql.py
91
Reformat with black
16,409
0
95
56
20
75,478
22
wagtail
13
wagtail/search/backends/database/mysql/mysql.py
Python
7
{ "docstring": "\n Returns all values to index as \"autocomplete\". This is the value of all AutocompleteFields\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
1
test_issue_alert_team
def test_issue_alert_team(self, mock_func): # add a second organization org = self.create_organization(owner=self.user) OrganizationIntegration.objects.create(organization=org, integration=self.integration) # add a second user to the team so we can be sure it's only # sent once (to the team, and not to each individual user) user2 = self.create_user(is_superuser=False) self.create_member(teams=[self.team], user=user2, organization=self.organization) self.idp = IdentityProvider.objects.create(type="slack", external_id="TXXXXXXX2", config={}) self.identity = Identity.objects.create( external_id="UXXXXXXX2", idp=self.idp, user=user2, status=IdentityStatus.VALID, scopes=[], ) NotificationSetting.objects.update_settings( ExternalProviders.SLACK, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.ALWAYS, user=user2, ) # update the team's notification settings ExternalActor.objects.create( actor=self.team.actor, organization=self.organization, integration=self.integration, provider=ExternalProviders.SLACK.value, external_name="goma", external_id="CXXXXXXX2", ) NotificationSetting.objects.update_settings( ExternalProviders.SLACK, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.ALWAYS, team=self.team, ) event = self.store_event( data={"message": "Hello world", "level": "error"}, project_id=self.project.id ) action_data = { "id": "sentry.mail.actions.NotifyEmailAction", "targetType": "Team", "targetIdentifier": str(self.team.id), } rule = Rule.objects.create( project=self.project, label="ja rule", data={ "match": "all", "actions": [action_data], }, ) notification = Notification(event=event, rule=rule) with self.options({"system.url-prefix": "http://example.com"}), self.tasks(): self.adapter.notify(notification, ActionTargetType.TEAM, self.team.id) # check that only one was sent out - more would mean each user is being notified # rather than the team assert len(responses.calls) == 1 # check that the team got a notification data = parse_qs(responses.calls[0].request.body) assert data["channel"] == ["CXXXXXXX2"] assert "attachments" in data attachments = json.loads(data["attachments"][0]) assert len(attachments) == 1 assert attachments[0]["title"] == "Hello world" assert ( attachments[0]["footer"] == f"{self.project.slug} | <http://example.com/settings/{self.organization.slug}/teams/{self.team.slug}/notifications/?referrer=issue_alert-slack-team|Notification Settings>" )
1730c481f1a8a71446326fa1ff72e10663016385
12
test_issue_alert.py
724
fix(notifications): Use `metrics_key` (#34572)
19,653
0
822
430
138
99,575
196
sentry
71
tests/sentry/integrations/slack/notifications/test_issue_alert.py
Python
63
{ "docstring": "Test that issue alerts are sent to a team in Slack.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/getsentry/sentry.git
1
check_pt_to_tf_equivalence
def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # All models tested in this file have attentions encoder_decoder_config.output_attentions = True pt_model = EncoderDecoderModel(encoder_decoder_config) with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname: pt_model.encoder.save_pretrained(encoder_tmp_dirname) pt_model.decoder.save_pretrained(decoder_tmp_dirname) tf_model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_tmp_dirname, decoder_tmp_dirname, encoder_from_pt=True, decoder_from_pt=True ) # This is only for copying some specific attributes of this particular model. tf_model.config = pt_model.config self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict)
6561fbcc6e6d6e1a29fb848dc34710aa25feae78
11
test_modeling_tf_encoder_decoder.py
171
Update TF(Vision)EncoderDecoderModel PT/TF equivalence tests (#18073) Co-authored-by: Joao Gante <[email protected]> Co-authored-by: ydshieh <[email protected]>
5,885
0
213
106
56
32,231
69
transformers
25
tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py
Python
13
{ "docstring": "EncoderDecoderModel requires special way to cross load (PT -> TF)", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/huggingface/transformers.git
1
test_get_error_lines_error_empty_lines_around_error
def test_get_error_lines_error_empty_lines_around_error(self): m = mock_open() m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n', ' \n', ' \n', ' '] with patch('builtins.open', m): self.obj.ansible_pos = ('foo.yml', 5, 1) e = AnsibleError(self.message, self.obj) self.assertEqual( e.message, ("This is the error message\n\nThe error appears to be in 'foo.yml': line 5, column 1, but may\nbe elsewhere in the file depending on " "the exact syntax problem.\n\nThe offending line appears to be:\n\nthis is line 2\nthis is line 3\n^ here\n") )
b61380827758f8357b6a2721e4a8f290f05c6eaa
12
test_errors.py
154
Remove obsolete unit test builtins compat.
78,515
0
201
78
56
266,685
80
ansible
13
test/units/errors/test_errors.py
Python
11
{ "docstring": "Test that trailing whitespace after the error is removed", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ansible/ansible.git
6
handler
def handler(request, operation, current_url): if operation != QNetworkAccessManager.Operation.GetOperation: return networkreply.ErrorNetworkReply( request, "Unsupported request type", QNetworkReply.NetworkError.ContentOperationNotPermittedError) url = request.url() if ((url.scheme(), url.host(), url.path()) == ('qute', 'settings', '/set')): if current_url != QUrl('qute://settings/'): log.network.warning("Blocking malicious request from {} to {}" .format(current_url.toDisplayString(), url.toDisplayString())) return networkreply.ErrorNetworkReply( request, "Invalid qute://settings request", QNetworkReply.NetworkError.ContentAccessDenied) try: mimetype, data = qutescheme.data_for_url(url) except qutescheme.Error as e: errors = { qutescheme.NotFoundError: QNetworkReply.NetworkError.ContentNotFoundError, qutescheme.UrlInvalidError: QNetworkReply.NetworkError.ContentOperationNotPermittedError, qutescheme.RequestDeniedError: QNetworkReply.NetworkError.ContentAccessDenied, qutescheme.SchemeOSError: QNetworkReply.NetworkError.ContentNotFoundError, qutescheme.Error: QNetworkReply.NetworkError.InternalServerError, } exctype = type(e) log.misc.error("{} while handling qute://* URL".format( exctype.__name__)) return networkreply.ErrorNetworkReply(request, str(e), errors[exctype]) except qutescheme.Redirect as e: qtutils.ensure_valid(e.url) return networkreply.RedirectNetworkReply(e.url) return networkreply.FixedDataNetworkReply(request, data, mimetype)
0877fb0d78635692e481c8bde224fac5ad0dd430
15
webkitqutescheme.py
418
Run scripts/dev/rewrite_enums.py
117,576
0
483
264
76
321,172
93
qutebrowser
47
qutebrowser/browser/webkit/network/webkitqutescheme.py
Python
38
{ "docstring": "Scheme handler for qute:// URLs.\n\n Args:\n request: QNetworkRequest to answer to.\n operation: The HTTP operation being done.\n current_url: The page we're on currently.\n\n Return:\n A QNetworkReply.\n ", "language": "en", "n_whitespaces": 63, "n_words": 26, "vocab_size": 25 }
https://github.com/qutebrowser/qutebrowser.git
2
parsed_pipfile
def parsed_pipfile(self) -> Union[tomlkit.toml_document.TOMLDocument, TPipfile]: contents = self.read_pipfile() # use full contents to get around str/bytes 2/3 issues cache_key = (self.pipfile_location, contents) if cache_key not in _pipfile_cache: parsed = self._parse_pipfile(contents) _pipfile_cache[cache_key] = parsed return _pipfile_cache[cache_key]
4b996c0fa85824b323ad9eff3364dbe2213ebb4c
10
project.py
92
Convert type comments to type annotations
3,731
0
99
58
28
21,231
35
pipenv
14
pipenv/project.py
Python
10
{ "docstring": "Parse Pipfile into a TOMLFile and cache it\n\n (call clear_pipfile_cache() afterwards if mutating)", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
https://github.com/pypa/pipenv.git
1
_get_free_vram
def _get_free_vram(self) -> List[float]: vram = self._all_vram self._log("debug", f"GPU VRAM free: {vram}") return vram
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
9
amd.py
51
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
20,027
0
42
27
13
100,563
14
faceswap
7
lib/gpu_stats/amd.py
Python
19
{ "docstring": " Obtain the amount of VRAM that is available, in Megabytes, for each connected AMD\n GPU.\n\n Notes\n -----\n There is no useful way to get free VRAM on PlaidML. OpenCL loads and unloads VRAM as\n required, so this returns the total memory available per card for AMD GPUs, which is\n not particularly useful.\n\n Returns\n -------\n list\n List of `float`s containing the amount of VRAM available, in Megabytes, for each\n connected GPU as corresponding to the values in :attr:`_handles\n ", "language": "en", "n_whitespaces": 172, "n_words": 77, "vocab_size": 55 }
https://github.com/deepfakes/faceswap.git
3
_mark_tests
def _mark_tests(items): if os.environ.get("NETWORKX_GRAPH_CONVERT"): plugin_name = os.environ["NETWORKX_GRAPH_CONVERT"] backend = plugins[plugin_name].load() if hasattr(backend, "on_start_tests"): getattr(backend, "on_start_tests")(items)
0f91550007fd3a95261d858b1a6a623ef8bda38a
13
backends.py
91
plugin based backend infrastructure to use multiple computation backends (#6000) * Wrappers classes to dispatch to a backend * Rework the backend dispatching - Use __networkx_plugin__=name to find graph-like objects instead of subclassing - Add PluginInfo to smooth over differences in importlib.metadata across python versions - Add dispatch behavior override via environment variable to aid in testing plugins * Dispatch more algorithms and improve auto-test capabilities * Allow dispatcher decorator without a name - Name is taken from the decorated function - Raise error if backend doesn't implement a decorated function which is called - Check for duplicate names for dispatching algorithms * Make sphinx pick up backend docs * make black happy * Rename decorator to _dispatch as it's experimental * A few more dispatched functions * Make convert to and from methods for auto-testing - Rename `convert` to `convert_from_nx` - Add `convert_to_nx` function These will allow backends to return native objects when dispatching, but provide a mechanism to convert the result to the type expected by NetworkX tests for the auto-test plugin mechanism. * More dispatching * Include name with `convert_**_nx` methods * Remove known plugin names This check is not needed, as any plugin can register itself in the entry points section. The dispatching and auto-testing explicitly specify the plugin to use, so there is no need to hardcode the options. These were originally included for security, but any malicious actor would simply use one of the valid names, so having a hardcoded list does not actually provide any meaningful security. * Add `dispatchname` to dispatchable functions Co-authored-by: Jim Kitchen <[email protected]> Co-authored-by: Erik Welch <[email protected]>
42,378
0
53
51
13
177,442
15
networkx
11
networkx/classes/backends.py
Python
6
{ "docstring": "Allow backend to mark tests (skip or xfail) if they aren't able to correctly handle them", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
https://github.com/networkx/networkx.git
4
_get_formatter
def _get_formatter(self, **kwargs): config = { attr: getattr(self, attr) for attr in [ "include_sign", "group_with_commas", "num_decimal_places", ] } config.update(kwargs) return "".join( [ "{", config.get("field_name", ""), ":", "+" if config["include_sign"] else "", "," if config["group_with_commas"] else "", ".", str(config["num_decimal_places"]), "f", "}", ], )
902e7eb4f0147b5882a613b67467e38a1d47f01e
12
numbers.py
163
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
46,054
0
319
92
38
189,440
42
manim
10
manim/mobject/numbers.py
Python
23
{ "docstring": "\n Configuration is based first off instance attributes,\n but overwritten by any kew word argument. Relevant\n key words:\n - include_sign\n - group_with_commas\n - num_decimal_places\n - field_name (e.g. 0 or 0.real)\n ", "language": "en", "n_whitespaces": 87, "n_words": 29, "vocab_size": 26 }
https://github.com/ManimCommunity/manim.git
1
parametrize_backend
def parametrize_backend(cls): assert not hasattr(cls, "backend") cls.backend = SessionsReleaseHealthBackend()
cd803d173c72b64d06c0687170bf9a945d0b503c
9
test_sessions.py
39
fix(snuba): Add appropriate `UseCaseKey` for indexer [TET-146] (#36308) * fix(snuba): Add appropriate `UseCaseKey` for indexer Update indexer invocation call to have the appropriate `UseCaseKey` depending on use case. In `src/sentry/sentry_metrics/indexer/base.py::StringIndexer` when using `resolve` and `reverse_resolve` callers should not rely on the default use_case_id. Important changes: - Add required parameter `use_case_id: UseCaseKey` to `get_series` from `src/sentry/snuba/metrics/datasource.py#L612`; - Add required parameter to `get_metrics` in `src/sentry/snuba/metrics/datasource.py` - Add required parameter to `get_tags` in `src/sentry/snuba/metrics/datasource.py` - Add required parameter to `get_tag_values` in `src/sentry/snuba/metrics/datasource.py`
18,965
0
18
104
9
93,002
9
sentry
5
tests/snuba/sessions/test_sessions.py
Python
16
{ "docstring": "\n hack to parametrize test-classes by backend. Ideally we'd move\n over to pytest-style tests so we can use `pytest.mark.parametrize`, but\n hopefully we won't have more than one backend in the future.\n ", "language": "en", "n_whitespaces": 43, "n_words": 30, "vocab_size": 28 }
https://github.com/getsentry/sentry.git
3
handle_error_code
def handle_error_code(requests_obj, error_code_map): for error_code, error_msg in error_code_map.items(): if requests_obj.status_code == error_code: console.print(error_msg)
401e4c739a6f9d18944e0ab49c782e97b56fda94
11
helper_funcs.py
53
Output Missing API Key Message to Console (#1357) * Decorator to output error msg to console of missing API Key * Refactor FMP & alpha advantage * Refactor FRED & QUANDL * Refactor Polygon * Refactor FRED * Refactor FRED * Refactor Finnhub & coinmarketcap & Newsapi * Allow disabling of check api * Updating tests : disable check api for tests * Refactor Finnhub & SI & Binance * Fix linting * Fix test & add black formatting * Fix test failing * Fix test failing * Refactor CryptoPanic & Whales alert & Glassnode & Coinglass * Refactor ETHexplorer & Smartstake & Alpha Advanage & Coinbase * Add decorators to controllers * Fix test & Refactor Coinbase, RH, Reddit * Add contributing guideline * Update CONTRIBUTING.md * Update CONTRIBUTING.md * fix tests * add decorator to snews cmd Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: didierlopes.eth <[email protected]>
84,294
0
37
32
13
282,770
13
OpenBBTerminal
9
gamestonk_terminal/helper_funcs.py
Python
4
{ "docstring": "\n Helper function to handle error code of HTTP requests.\n\n Parameters\n ----------\n requests_obj: Object\n Request object\n error_code_map: Dict\n Dictionary mapping of HTTP error code and output message\n\n ", "language": "en", "n_whitespaces": 59, "n_words": 26, "vocab_size": 22 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
render_landing_page
def render_landing_page(self, request, form_submission=None, *args, **kwargs): context = self.get_context(request) context["form_submission"] = form_submission return TemplateResponse( request, self.get_landing_page_template(request), context )
d10f15e55806c6944827d801cd9c2d53f5da4186
9
models.py
72
Reformat with black
15,921
0
64
46
15
72,991
18
wagtail
10
wagtail/contrib/forms/models.py
Python
6
{ "docstring": "\n Renders the landing page.\n\n You can override this method to return a different HttpResponse as\n landing page. E.g. you could return a redirect to a separate page.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 20 }
https://github.com/wagtail/wagtail.git
2
get_combiner_conds
def get_combiner_conds(): combiner_types = sorted(list(combiner_registry.keys())) conds = [] for combiner_type in combiner_types: combiner_cls = combiner_registry[combiner_type] schema_cls = combiner_cls.get_schema_cls() combiner_schema = schema.get_custom_schema_from_marshmallow_class(schema_cls) combiner_props = combiner_schema["properties"] combiner_cond = schema.create_cond({"type": combiner_type}, combiner_props) conds.append(combiner_cond) return conds # super class to house common properties
23a33eef3bc7ea3ba33ec56dc9b56ba38462648a
13
combiners.py
130
feat: Modify Trainer to use marshmallow_dataclass syntax for handling hyperparameters. Add basic scripting for docstring extraction to marshmallow schema. Fix some existing marshmallow issues. (#1606)
1,011
0
95
76
32
6,534
39
ludwig
18
ludwig/combiners/combiners.py
Python
11
{ "docstring": "Returns a list of if-then JSON clauses for each combiner type in `combiner_registry` and its properties'\n constraints.", "language": "en", "n_whitespaces": 19, "n_words": 17, "vocab_size": 17 }
https://github.com/ludwig-ai/ludwig.git
15
ravel_multi_index
def ravel_multi_index(multi_index, dims, mode='raise', order='C'): assert len(multi_index) == len(dims), f"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}" dims = tuple(core.concrete_or_error(operator.index, d, "in `dims` argument of ravel_multi_index().") for d in dims) _check_arraylike("ravel_multi_index", *multi_index) for index in multi_index: if mode == 'raise': core.concrete_or_error(array, index, "The error occurred because ravel_multi_index was jit-compiled" " with mode='raise'. Use mode='wrap' or mode='clip' instead.") if not issubdtype(_dtype(index), integer): raise TypeError("only int indices permitted") if mode == "raise": if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)): raise ValueError("invalid entry in coordinates array") elif mode == "clip": multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)] elif mode == "wrap": multi_index = [i % d for i, d in zip(multi_index, dims)] else: raise ValueError(f"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'") if order == "F": strides = np.cumprod((1,) + dims[:-1]) elif order == "C": strides = np.cumprod((1,) + dims[1:][::-1])[::-1] else: raise ValueError(f"invalid order={order!r}. Expected 'C' or 'F'") result = array(0, dtype=multi_index[0].dtype) for i, s in zip(multi_index, strides): result = result + i * int(s) return result _UNRAVEL_INDEX_DOC = @_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)
3ad08543a9d766d8e6b9d7272cebfe4f2c431980
@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)
16
lax_numpy.py
533
[x64] make jnp.histogram and related functions work with strict promotion
26,928
1
246
297
113
120,708
175
jax
35
jax/_src/numpy/lax_numpy.py
Python
30
{ "docstring": "\\\nUnlike numpy's implementation of unravel_index, negative indices are accepted\nand out-of-bounds indices are clipped into the valid range.\n", "language": "en", "n_whitespaces": 16, "n_words": 19, "vocab_size": 17 }
https://github.com/google/jax.git
1
_build_ui
def _build_ui(self) -> None: container = ttk.PanedWindow(self, orient=tk.VERTICAL) container.pack(fill=tk.BOTH, expand=True) setattr(container, "preview_display", self._display) # TODO subclass not setattr self._image_canvas = ImagesCanvas(container, self._tk_vars) container.add(self._image_canvas, weight=3) options_frame = ttk.Frame(container) self._cli_frame = ActionFrame( options_frame, self._available_masks, self._samples.predictor.has_predicted_mask, self._patch.converter.cli_arguments.color_adjustment.replace("-", "_"), self._patch.converter.cli_arguments.mask_type.replace("-", "_"), self._config_tools, self._refresh, self._samples.generate, self._tk_vars) self._opts_book = OptionsBook(options_frame, self._config_tools, self._refresh) container.add(options_frame, weight=1) self.update_idletasks() container.sashpos(0, int(400 * get_config().scaling_factor))
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
14
preview.py
306
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,851
0
346
198
46
101,438
53
faceswap
43
tools/preview/preview.py
Python
25
{ "docstring": " Build the elements for displaying preview images and options panels. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
5
update_config
def update_config(self) -> None: for section, items in self.tk_vars.items(): for item, value in items.items(): try: new_value = str(value.get()) except tk.TclError as err: # When manually filling in text fields, blank values will # raise an error on numeric data types so return 0 logger.debug("Error getting value. Defaulting to 0. Error: %s", str(err)) new_value = str(0) old_value = self._config.config[section][item] if new_value != old_value: logger.trace("Updating config: %s, %s from %s to %s", # type: ignore section, item, old_value, new_value) self._config.config[section][item] = new_value
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
16
preview.py
184
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,834
0
331
113
63
101,420
80
faceswap
19
tools/preview/preview.py
Python
14
{ "docstring": " Update :attr:`config` with the currently selected values from the GUI. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 9 }
https://github.com/deepfakes/faceswap.git
8
readline
def readline(self, size=-1): r # For backwards compatibility, a (slowish) readline(). if hasattr(self, "peek"):
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
_pyio.py
35
add python 3.10.4 for windows
55,865
0
34
110
14
219,860
14
XX-Net
4
python3.10.4/Lib/_pyio.py
Python
32
{ "docstring": "Read and return a line of bytes from the stream.\n\n If size is specified, at most size bytes will be read.\n Size should be an int.\n\n The line terminator is always b'\\n' for binary files; for text\n files, the newlines argument to open can be used to select the line\n terminator(s) recognized.\n ", "language": "en", "n_whitespaces": 94, "n_words": 52, "vocab_size": 41 }
https://github.com/XX-net/XX-Net.git
17
_text2settings
def _text2settings(self): t2xs = [ (self.t2f, "font"), (self.t2s, "slant"), (self.t2w, "weight"), (self.t2c, "color"), ] setting_args = {arg: getattr(self, arg) for _, arg in t2xs} settings = self._get_settings_from_t2xs(t2xs) settings.extend(self._get_settings_from_gradient(setting_args)) # Handle overlaps settings.sort(key=lambda setting: setting.start) for index, setting in enumerate(settings): if index + 1 == len(settings): break next_setting = settings[index + 1] if setting.end > next_setting.start: new_setting = self._merge_settings(setting, next_setting, setting_args) new_index = index + 1 while ( new_index < len(settings) and settings[new_index].start < new_setting.start ): new_index += 1 settings.insert(new_index, new_setting) # Set all text settings (default font, slant, weight) temp_settings = settings.copy() start = 0 for setting in settings: if setting.start != start: temp_settings.append(TextSetting(start, setting.start, **setting_args)) start = setting.end if start != len(self.text): temp_settings.append(TextSetting(start, len(self.text), **setting_args)) settings = sorted(temp_settings, key=lambda setting: setting.start) if re.search(r"\n", self.text): line_num = 0 for start, end in self._find_indexes("\n", self.text): for setting in settings: if setting.line_num == -1: setting.line_num = line_num if start < setting.end: line_num += 1 new_setting = copy.copy(setting) setting.end = end new_setting.start = end new_setting.line_num = line_num settings.append(new_setting) settings.sort(key=lambda setting: setting.start) break for setting in settings: if setting.line_num == -1: setting.line_num = 0 return settings
902e7eb4f0147b5882a613b67467e38a1d47f01e
18
text_mobject.py
612
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
46,094
0
888
389
99
189,494
182
manim
38
manim/mobject/svg/text_mobject.py
Python
52
{ "docstring": "Converts the texts and styles to a setting for parsing.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ManimCommunity/manim.git
7
karate_club_graph
def karate_club_graph(): # Create the set of all members, and the members of each club. all_members = set(range(34)) club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21} # club2 = all_members - club1 G = nx.Graph() G.add_nodes_from(all_members) G.name = "Zachary's Karate Club" zacharydat = for row, line in enumerate(zacharydat.split("\n")): thisrow = [int(b) for b in line.split()] for col, entry in enumerate(thisrow): if entry >= 1: G.add_edge(row, col, weight=entry) # Add the name of each member's club as a node attribute. for v in G: G.nodes[v]["club"] = "Mr. Hi" if v in club1 else "Officer" return G
290ebce534b84f9db20ec58b98cbb170e65a0ba1
14
social.py
241
Add weights to karate club graph (#5285) Add weights to the karate_club_graph. Modifies `non_randomness` and `naive_greedy_modularity_communities` to accept a `weight` parameter and modifies tests that use the kcg accordingly Co-authored-by: Kevin Berry <[email protected]> Co-authored-by: Dan Schult <[email protected]>
41,796
0
193
154
77
176,261
106
networkx
24
networkx/generators/social.py
Python
49
{ "docstring": "Returns Zachary's Karate Club graph.\n\n Each node in the returned graph has a node attribute 'club' that\n indicates the name of the club to which the member represented by that node\n belongs, either 'Mr. Hi' or 'Officer'. Each edge has a weight based on the\n number of contexts in which that edge's incident node members interacted.\n\n Examples\n --------\n To get the name of the club to which a node belongs::\n\n >>> G = nx.karate_club_graph()\n >>> G.nodes[5][\"club\"]\n 'Mr. Hi'\n >>> G.nodes[9][\"club\"]\n 'Officer'\n\n References\n ----------\n .. [1] Zachary, Wayne W.\n \"An Information Flow Model for Conflict and Fission in Small Groups.\"\n *Journal of Anthropological Research*, 33, 452--473, (1977).\n \\\n0 4 5 3 3 3 3 2 2 0 2 3 2 3 0 0 0 2 0 2 0 2 0 0 0 0 0 0 0 0 0 2 0 0\n4 0 6 3 0 0 0 4 0 0 0 0 0 5 0 0 0 1 0 2 0 2 0 0 0 0 0 0 0 0 2 0 0 0\n5 6 0 3 0 0 0 4 5 1 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 0 0 0 3 0\n3 3 3 0 0 0 0 3 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n3 0 0 0 0 0 2 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n3 0 0 0 0 0 5 0 0 0 3 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n3 0 0 0 2 5 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n2 4 4 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n2 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 4 3\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2\n2 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n3 5 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 2\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 4\n0 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2\n2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1\n2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 0 4 0 2 0 0 5 4\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 3 0 0 0 2 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 2 0 0 0 0 0 0 7 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 2\n0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 3 0 0 0 0 0 0 0 0 4\n0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 2\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 4 0 0 0 0 0 3 2\n0 2 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3\n2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 7 0 0 2 0 0 0 4 4\n0 0 2 0 0 0 0 0 3 0 0 0 0 0 3 3 0 0 1 0 3 0 2 5 0 0 0 0 0 4 3 4 0 5\n0 0 0 0 0 0 0 0 4 2 0 0 0 3 2 4 0 0 2 1 1 0 3 4 0 0 2 4 2 2 3 4 5 0", "language": "en", "n_whitespaces": 1308, "n_words": 1263, "vocab_size": 85 }
https://github.com/networkx/networkx.git